Merge branch 'master' into feature/rank-eval
Conflicts: test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
This commit is contained in:
commit
1c02e48a8f
|
@ -117,7 +117,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
|
||||
if (false == test.continued) {
|
||||
current.println('---')
|
||||
current.println("\"$test.start\":")
|
||||
current.println("\"line_$test.start\":")
|
||||
}
|
||||
if (test.skipTest) {
|
||||
current.println(" - skip:")
|
||||
|
|
|
@ -97,8 +97,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
// with a full elasticsearch server that includes optional deps
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
|
||||
provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
|
||||
provided "net.java.dev.jna:jna:${project.versions.jna}"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,7 +59,8 @@ class PrecommitTasks {
|
|||
* use the NamingConventionsCheck we break the circular dependency
|
||||
* here.
|
||||
*/
|
||||
precommitTasks.add(configureLoggerUsage(project))
|
||||
// https://github.com/elastic/elasticsearch/issues/20243
|
||||
// precommitTasks.add(configureLoggerUsage(project))
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -248,8 +248,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MappingMetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataCreateIndexService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataDeleteIndexService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexAliasesService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexStateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexTemplateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexUpgradeService.java" checks="LineLength" />
|
||||
|
@ -388,7 +386,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" />
|
||||
|
@ -629,7 +626,7 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
|
||||
|
@ -867,7 +864,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]ExceptionRetryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" />
|
||||
|
|
|
@ -6,7 +6,7 @@ spatial4j = 0.6
|
|||
jts = 1.13
|
||||
jackson = 2.8.1
|
||||
snakeyaml = 1.15
|
||||
log4j = 1.2.17
|
||||
log4j = 2.6.2
|
||||
slf4j = 1.6.2
|
||||
jna = 4.2.2
|
||||
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
*/
|
||||
package org.elasticsearch.client.benchmark.ops.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.benchmark.BenchmarkTask;
|
||||
import org.elasticsearch.client.benchmark.metrics.Sample;
|
||||
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -135,7 +135,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
|||
|
||||
|
||||
private static final class BulkIndexer implements Runnable {
|
||||
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
|
||||
private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
|
||||
|
||||
private final BlockingQueue<List<String>> bulkData;
|
||||
private final int warmupIterations;
|
||||
|
|
|
@ -89,17 +89,19 @@ public class RestClient implements Closeable {
|
|||
//we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
|
||||
private final Header[] defaultHeaders;
|
||||
private final long maxRetryTimeoutMillis;
|
||||
private final String pathPrefix;
|
||||
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
|
||||
private volatile Set<HttpHost> hosts;
|
||||
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
|
||||
private final FailureListener failureListener;
|
||||
|
||||
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
HttpHost[] hosts, FailureListener failureListener) {
|
||||
HttpHost[] hosts, String pathPrefix, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
this.defaultHeaders = defaultHeaders;
|
||||
this.failureListener = failureListener;
|
||||
this.pathPrefix = pathPrefix;
|
||||
setHosts(hosts);
|
||||
}
|
||||
|
||||
|
@ -280,7 +282,7 @@ public class RestClient implements Closeable {
|
|||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
URI uri = buildUri(endpoint, params);
|
||||
URI uri = buildUri(pathPrefix, endpoint, params);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
|
@ -360,12 +362,17 @@ public class RestClient implements Closeable {
|
|||
|
||||
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
|
||||
Objects.requireNonNull(requestHeaders, "request headers must not be null");
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
httpRequest.setHeader(defaultHeader);
|
||||
}
|
||||
// request headers override default headers, so we don't add default headers if they exist as request headers
|
||||
final Set<String> requestNames = new HashSet<>(requestHeaders.length);
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
Objects.requireNonNull(requestHeader, "request header must not be null");
|
||||
httpRequest.setHeader(requestHeader);
|
||||
httpRequest.addHeader(requestHeader);
|
||||
requestNames.add(requestHeader.getName());
|
||||
}
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
if (requestNames.contains(defaultHeader.getName()) == false) {
|
||||
httpRequest.addHeader(defaultHeader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -501,10 +508,21 @@ public class RestClient implements Closeable {
|
|||
return httpRequest;
|
||||
}
|
||||
|
||||
private static URI buildUri(String path, Map<String, String> params) {
|
||||
private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
try {
|
||||
URIBuilder uriBuilder = new URIBuilder(path);
|
||||
String fullPath;
|
||||
if (pathPrefix != null) {
|
||||
if (path.startsWith("/")) {
|
||||
fullPath = pathPrefix + path;
|
||||
} else {
|
||||
fullPath = pathPrefix + "/" + path;
|
||||
}
|
||||
} else {
|
||||
fullPath = path;
|
||||
}
|
||||
|
||||
URIBuilder uriBuilder = new URIBuilder(fullPath);
|
||||
for (Map.Entry<String, String> param : params.entrySet()) {
|
||||
uriBuilder.addParameter(param.getKey(), param.getValue());
|
||||
}
|
||||
|
|
|
@ -51,12 +51,17 @@ public final class RestClientBuilder {
|
|||
private RestClient.FailureListener failureListener;
|
||||
private HttpClientConfigCallback httpClientConfigCallback;
|
||||
private RequestConfigCallback requestConfigCallback;
|
||||
private String pathPrefix;
|
||||
|
||||
/**
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
*
|
||||
* @throws NullPointerException if {@code hosts} or any host is {@code null}.
|
||||
* @throws IllegalArgumentException if {@code hosts} is empty.
|
||||
*/
|
||||
RestClientBuilder(HttpHost... hosts) {
|
||||
if (hosts == null || hosts.length == 0) {
|
||||
Objects.requireNonNull(hosts, "hosts must not be null");
|
||||
if (hosts.length == 0) {
|
||||
throw new IllegalArgumentException("no hosts provided");
|
||||
}
|
||||
for (HttpHost host : hosts) {
|
||||
|
@ -66,7 +71,11 @@ public final class RestClientBuilder {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the default request headers, which will be sent along with each request
|
||||
* Sets the default request headers, which will be sent along with each request.
|
||||
* <p>
|
||||
* Request-time headers will always overwrite any default headers.
|
||||
*
|
||||
* @throws NullPointerException if {@code defaultHeaders} or any header is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setDefaultHeaders(Header[] defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
|
||||
|
@ -79,6 +88,8 @@ public final class RestClientBuilder {
|
|||
|
||||
/**
|
||||
* Sets the {@link RestClient.FailureListener} to be notified for each request failure
|
||||
*
|
||||
* @throws NullPointerException if {@code failureListener} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setFailureListener(RestClient.FailureListener failureListener) {
|
||||
Objects.requireNonNull(failureListener, "failureListener must not be null");
|
||||
|
@ -90,7 +101,7 @@ public final class RestClientBuilder {
|
|||
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
|
||||
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
|
||||
*
|
||||
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
|
||||
* @throws IllegalArgumentException if {@code maxRetryTimeoutMillis} is not greater than 0
|
||||
*/
|
||||
public RestClientBuilder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
|
||||
if (maxRetryTimeoutMillis <= 0) {
|
||||
|
@ -102,6 +113,8 @@ public final class RestClientBuilder {
|
|||
|
||||
/**
|
||||
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
|
||||
*
|
||||
* @throws NullPointerException if {@code httpClientConfigCallback} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
|
||||
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
|
||||
|
@ -111,6 +124,8 @@ public final class RestClientBuilder {
|
|||
|
||||
/**
|
||||
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
|
||||
*
|
||||
* @throws NullPointerException if {@code requestConfigCallback} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
|
||||
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
|
||||
|
@ -118,6 +133,43 @@ public final class RestClientBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the path's prefix for every request used by the http client.
|
||||
* <p>
|
||||
* For example, if this is set to "/my/path", then any client request will become <code>"/my/path/" + endpoint</code>.
|
||||
* <p>
|
||||
* In essence, every request's {@code endpoint} is prefixed by this {@code pathPrefix}. The path prefix is useful for when
|
||||
* Elasticsearch is behind a proxy that provides a base path; it is not intended for other purposes and it should not be supplied in
|
||||
* other scenarios.
|
||||
*
|
||||
* @throws NullPointerException if {@code pathPrefix} is {@code null}.
|
||||
* @throws IllegalArgumentException if {@code pathPrefix} is empty, only '/', or ends with more than one '/'.
|
||||
*/
|
||||
public RestClientBuilder setPathPrefix(String pathPrefix) {
|
||||
Objects.requireNonNull(pathPrefix, "pathPrefix must not be null");
|
||||
String cleanPathPrefix = pathPrefix;
|
||||
|
||||
if (cleanPathPrefix.startsWith("/") == false) {
|
||||
cleanPathPrefix = "/" + cleanPathPrefix;
|
||||
}
|
||||
|
||||
// best effort to ensure that it looks like "/base/path" rather than "/base/path/"
|
||||
if (cleanPathPrefix.endsWith("/")) {
|
||||
cleanPathPrefix = cleanPathPrefix.substring(0, cleanPathPrefix.length() - 1);
|
||||
|
||||
if (cleanPathPrefix.endsWith("/")) {
|
||||
throw new IllegalArgumentException("pathPrefix is malformed. too many trailing slashes: [" + pathPrefix + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (cleanPathPrefix.isEmpty() || "/".equals(cleanPathPrefix)) {
|
||||
throw new IllegalArgumentException("pathPrefix must not be empty or '/': [" + pathPrefix + "]");
|
||||
}
|
||||
|
||||
this.pathPrefix = cleanPathPrefix;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RestClient} based on the provided configuration.
|
||||
*/
|
||||
|
@ -126,7 +178,7 @@ public final class RestClientBuilder {
|
|||
failureListener = new RestClient.FailureListener();
|
||||
}
|
||||
CloseableHttpAsyncClient httpClient = createHttpClient();
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener);
|
||||
httpClient.start();
|
||||
return restClient;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
|
@ -28,8 +27,10 @@ import org.apache.http.message.BasicHeader;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class RestClientBuilderTests extends RestClientTestCase {
|
||||
|
@ -38,8 +39,8 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
try {
|
||||
RestClient.builder((HttpHost[])null);
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("no hosts provided", e.getMessage());
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("hosts must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -62,7 +63,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200))
|
||||
.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
.setMaxRetryTimeoutMillis(randomIntBetween(Integer.MIN_VALUE, 0));
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage());
|
||||
|
@ -103,13 +104,13 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
assertEquals("requestConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numNodes = randomIntBetween(1, 5);
|
||||
HttpHost[] hosts = new HttpHost[numNodes];
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
hosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
RestClientBuilder builder = RestClient.builder(hosts);
|
||||
if (getRandom().nextBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
|
||||
@Override
|
||||
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
|
||||
|
@ -117,7 +118,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
|
||||
@Override
|
||||
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
|
||||
|
@ -125,19 +126,55 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
if (randomBoolean()) {
|
||||
int numHeaders = randomIntBetween(1, 5);
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
headers[i] = new BasicHeader("header" + i, "value");
|
||||
}
|
||||
builder.setDefaultHeaders(headers);
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
if (randomBoolean()) {
|
||||
builder.setMaxRetryTimeoutMillis(randomIntBetween(1, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String pathPrefix = (randomBoolean() ? "/" : "") + randomAsciiOfLengthBetween(2, 5);
|
||||
while (pathPrefix.length() < 20 && randomBoolean()) {
|
||||
pathPrefix += "/" + randomAsciiOfLengthBetween(3, 6);
|
||||
}
|
||||
builder.setPathPrefix(pathPrefix + (randomBoolean() ? "/" : ""));
|
||||
}
|
||||
try (RestClient restClient = builder.build()) {
|
||||
assertNotNull(restClient);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetPathPrefixNull() {
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null);
|
||||
fail("pathPrefix set to null should fail!");
|
||||
} catch (final NullPointerException e) {
|
||||
assertEquals("pathPrefix must not be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetPathPrefixEmpty() {
|
||||
assertSetPathPrefixThrows("/");
|
||||
assertSetPathPrefixThrows("");
|
||||
}
|
||||
|
||||
public void testSetPathPrefixMalformed() {
|
||||
assertSetPathPrefixThrows("//");
|
||||
assertSetPathPrefixThrows("base/path//");
|
||||
}
|
||||
|
||||
private static void assertSetPathPrefixThrows(final String pathPrefix) {
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(pathPrefix);
|
||||
fail("path prefix [" + pathPrefix + "] should have failed");
|
||||
} catch (final IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(pathPrefix));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,18 +19,15 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import com.sun.net.httpserver.Headers;
|
||||
import com.sun.net.httpserver.HttpContext;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.Consts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -60,6 +57,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
|
@ -81,13 +79,8 @@ public class RestClientIntegTests extends RestClientTestCase {
|
|||
for (int statusCode : getAllStatusCodes()) {
|
||||
createStatusCodeContext(httpServer, statusCode);
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
defaultHeaders = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
defaultHeaders[i] = new BasicHeader(headerName, headerValue);
|
||||
}
|
||||
int numHeaders = randomIntBetween(0, 5);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
|
||||
.setDefaultHeaders(defaultHeaders).build();
|
||||
}
|
||||
|
@ -146,44 +139,43 @@ public class RestClientIntegTests extends RestClientTestCase {
|
|||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
Set<String> standardHeaders = new HashSet<>(
|
||||
Arrays.asList("Connection", "Host", "User-agent", "Date"));
|
||||
final Set<String> standardHeaders = new HashSet<>(Arrays.asList("Connection", "Host", "User-agent", "Date"));
|
||||
if (method.equals("HEAD") == false) {
|
||||
standardHeaders.add("Content-length");
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
Map<String, String> expectedHeaders = new HashMap<>();
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
|
||||
}
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
headers[i] = new BasicHeader(headerName, headerValue);
|
||||
expectedHeaders.put(headerName, headerValue);
|
||||
}
|
||||
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(),
|
||||
(HttpEntity)null, headers);
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), headers);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
for (Header responseHeader : esResponse.getHeaders()) {
|
||||
if (responseHeader.getName().startsWith("Header")) {
|
||||
String headerValue = expectedHeaders.remove(responseHeader.getName());
|
||||
assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
|
||||
for (final Header responseHeader : esResponse.getHeaders()) {
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
if (name.startsWith("Header")) {
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
} else {
|
||||
assertTrue("unknown header was returned " + responseHeader.getName(),
|
||||
standardHeaders.remove(responseHeader.getName()));
|
||||
assertTrue("unknown header was returned " + name, standardHeaders.remove(name));
|
||||
}
|
||||
}
|
||||
assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size());
|
||||
assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size());
|
||||
assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,6 +197,38 @@ public class RestClientIntegTests extends RestClientTestCase {
|
|||
bodyTest("GET");
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that pathPrefix works as expected.
|
||||
*/
|
||||
public void testPathPrefix() throws IOException {
|
||||
// guarantee no other test setup collides with this one and lets it sneak through
|
||||
final String uniqueContextSuffix = "/testPathPrefix";
|
||||
final String pathPrefix = "base/" + randomAsciiOfLengthBetween(1, 5) + "/";
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
|
||||
final HttpContext context =
|
||||
httpServer.createContext("/" + pathPrefix + statusCode + uniqueContextSuffix, new ResponseHandler(statusCode));
|
||||
|
||||
try (final RestClient client =
|
||||
RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
|
||||
.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefix).build()) {
|
||||
|
||||
for (final String method : getHttpMethods()) {
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = client.performRequest(method, "/" + statusCode + uniqueContextSuffix);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
|
||||
assertThat(esResponse.getRequestLine().getUri(), equalTo("/" + pathPrefix + statusCode + uniqueContextSuffix));
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
}
|
||||
} finally {
|
||||
httpServer.removeContext(context);
|
||||
}
|
||||
}
|
||||
|
||||
private void bodyTest(String method) throws IOException {
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
StringEntity entity = new StringEntity(requestBody);
|
||||
|
|
|
@ -101,7 +101,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener);
|
||||
}
|
||||
|
||||
public void testRoundRobinOkStatusCodes() throws IOException {
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
|
@ -41,7 +39,6 @@ import org.apache.http.concurrent.FutureCallback;
|
|||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
|
@ -58,7 +55,10 @@ import java.net.URI;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
|
||||
|
@ -132,16 +132,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
});
|
||||
|
||||
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
defaultHeaders = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
defaultHeaders[i] = new BasicHeader(headerName, headerValue);
|
||||
}
|
||||
int numHeaders = randomIntBetween(0, 3);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -333,20 +328,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
Map<String, String> expectedHeaders = new HashMap<>();
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
headers[i] = new BasicHeader(headerName, headerValue);
|
||||
expectedHeaders.put(headerName, headerValue);
|
||||
}
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", null, numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, headers);
|
||||
|
@ -355,10 +343,18 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
for (Header responseHeader : esResponse.getHeaders()) {
|
||||
String headerValue = expectedHeaders.remove(responseHeader.getName());
|
||||
assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
}
|
||||
assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size());
|
||||
assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -368,11 +364,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
Map<String, String> params = Collections.emptyMap();
|
||||
boolean hasParams = randomBoolean();
|
||||
if (hasParams) {
|
||||
int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
|
||||
int numParams = randomIntBetween(1, 3);
|
||||
params = new HashMap<>(numParams);
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
String paramKey = "param-" + i;
|
||||
String paramValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
String paramValue = randomAsciiOfLengthBetween(3, 10);
|
||||
params.put(paramKey, paramValue);
|
||||
uriBuilder.addParameter(paramKey, paramValue);
|
||||
}
|
||||
|
@ -412,24 +408,24 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
HttpEntity entity = null;
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
|
||||
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100));
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
}
|
||||
|
||||
Header[] headers = new Header[0];
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
//default headers are expected but not sent for each request
|
||||
request.setHeader(defaultHeader);
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Set<String> uniqueNames = new HashSet<>(numHeaders);
|
||||
if (randomBoolean()) {
|
||||
headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header);
|
||||
uniqueNames.add(header.getName());
|
||||
}
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
BasicHeader basicHeader = new BasicHeader(headerName, headerValue);
|
||||
headers[i] = basicHeader;
|
||||
request.setHeader(basicHeader);
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
// request level headers override default headers
|
||||
if (uniqueNames.contains(defaultHeader.getName()) == false) {
|
||||
request.addHeader(defaultHeader);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -459,4 +455,5 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ install.enabled = false
|
|||
uploadArchives.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
|
|
@ -31,6 +31,15 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
|||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
@TestMethodProviders({
|
||||
JUnit3MethodProvider.class
|
||||
})
|
||||
|
@ -43,4 +52,71 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
|||
@TimeoutSuite(millis = 2 * 60 * 60 * 1000)
|
||||
public abstract class RestClientTestCase extends RandomizedTest {
|
||||
|
||||
/**
|
||||
* Create the specified number of {@link Header}s.
|
||||
* <p>
|
||||
* Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied.
|
||||
*
|
||||
* @param baseName The base name to use for all headers.
|
||||
* @param arrayName The optional ({@code null}able) array name to use randomly.
|
||||
* @param headers The number of headers to create.
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) {
|
||||
final Header[] generated = new Header[headers];
|
||||
for (int i = 0; i < headers; i++) {
|
||||
String headerName = baseName + i;
|
||||
if (arrayName != null && rarely()) {
|
||||
headerName = arrayName;
|
||||
}
|
||||
|
||||
generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
return generated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list.
|
||||
*
|
||||
* @param map The map to manipulate.
|
||||
* @param name The name to create/append the list for.
|
||||
* @param value The value to add.
|
||||
*/
|
||||
private static void createOrAppendList(final Map<String, List<String>> map, final String name, final String value) {
|
||||
List<String> values = map.get(name);
|
||||
|
||||
if (values == null) {
|
||||
values = new ArrayList<>();
|
||||
map.put(name, values);
|
||||
}
|
||||
|
||||
values.add(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist.
|
||||
* <p>
|
||||
* If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its
|
||||
* {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}.
|
||||
*
|
||||
* @param map The map to build with name/value(s) pairs.
|
||||
* @param defaultHeaders The headers to add to the map representing default headers.
|
||||
* @param headers The headers to add to the map representing request-level headers.
|
||||
* @see #createOrAppendList(Map, String, String)
|
||||
*/
|
||||
protected static void addHeaders(final Map<String, List<String>> map, final Header[] defaultHeaders, final Header[] headers) {
|
||||
final Set<String> uniqueHeaders = new HashSet<>();
|
||||
for (final Header header : headers) {
|
||||
final String name = header.getName();
|
||||
createOrAppendList(map, name, header.getValue());
|
||||
uniqueHeaders.add(name);
|
||||
}
|
||||
for (final Header defaultHeader : defaultHeaders) {
|
||||
final String name = defaultHeader.getName();
|
||||
if (uniqueHeaders.contains(name) == false) {
|
||||
createOrAppendList(map, name, defaultHeader.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.transport.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -40,6 +41,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
|||
|
||||
@Test
|
||||
public void testPluginInstalled() {
|
||||
// TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
|
||||
assumeFalse(Constants.JRE_IS_MINIMUM_JAVA9);
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
|
@ -49,9 +52,7 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
|||
|
||||
@Test
|
||||
public void testInstallPluginTwice() {
|
||||
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
|
||||
MustachePlugin.class)) {
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)) {
|
||||
try {
|
||||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
|
|
|
@ -85,8 +85,10 @@ dependencies {
|
|||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
compile "log4j:log4j:${versions.log4j}", optional
|
||||
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
|
||||
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
|
||||
// to bridge dependencies that are still on Log4j 1 to Log4j 2
|
||||
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
|
||||
|
||||
compile "net.java.dev.jna:jna:${versions.jna}"
|
||||
|
||||
|
@ -154,32 +156,94 @@ thirdPartyAudit.excludes = [
|
|||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.TopicConnection',
|
||||
'javax.jms.TopicConnectionFactory',
|
||||
'javax.jms.TopicPublisher',
|
||||
'javax.jms.TopicSession',
|
||||
'javax.jms.TopicSubscriber',
|
||||
// from log4j
|
||||
'com.fasterxml.jackson.annotation.JsonInclude$Include',
|
||||
'com.fasterxml.jackson.databind.DeserializationContext',
|
||||
'com.fasterxml.jackson.databind.JsonMappingException',
|
||||
'com.fasterxml.jackson.databind.JsonNode',
|
||||
'com.fasterxml.jackson.databind.Module$SetupContext',
|
||||
'com.fasterxml.jackson.databind.ObjectReader',
|
||||
'com.fasterxml.jackson.databind.ObjectWriter',
|
||||
'com.fasterxml.jackson.databind.SerializerProvider',
|
||||
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
|
||||
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
|
||||
'com.fasterxml.jackson.databind.module.SimpleModule',
|
||||
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
|
||||
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
|
||||
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
|
||||
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
|
||||
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
|
||||
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
|
||||
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
|
||||
'com.lmax.disruptor.BlockingWaitStrategy',
|
||||
'com.lmax.disruptor.BusySpinWaitStrategy',
|
||||
'com.lmax.disruptor.EventFactory',
|
||||
'com.lmax.disruptor.EventTranslator',
|
||||
'com.lmax.disruptor.EventTranslatorTwoArg',
|
||||
'com.lmax.disruptor.EventTranslatorVararg',
|
||||
'com.lmax.disruptor.ExceptionHandler',
|
||||
'com.lmax.disruptor.LifecycleAware',
|
||||
'com.lmax.disruptor.RingBuffer',
|
||||
'com.lmax.disruptor.Sequence',
|
||||
'com.lmax.disruptor.SequenceReportingEventHandler',
|
||||
'com.lmax.disruptor.SleepingWaitStrategy',
|
||||
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
|
||||
'com.lmax.disruptor.WaitStrategy',
|
||||
'com.lmax.disruptor.YieldingWaitStrategy',
|
||||
'com.lmax.disruptor.dsl.Disruptor',
|
||||
'com.lmax.disruptor.dsl.ProducerType',
|
||||
'javax.jms.Connection',
|
||||
'javax.jms.ConnectionFactory',
|
||||
'javax.jms.Destination',
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageConsumer',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.MessageProducer',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.Session',
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
'javax.mail.util.ByteArrayDataSource',
|
||||
'javax.persistence.AttributeConverter',
|
||||
'javax.persistence.EntityManager',
|
||||
'javax.persistence.EntityManagerFactory',
|
||||
'javax.persistence.EntityTransaction',
|
||||
'javax.persistence.Persistence',
|
||||
'javax.persistence.PersistenceException',
|
||||
'org.apache.commons.compress.compressors.CompressorStreamFactory',
|
||||
'org.apache.commons.compress.utils.IOUtils',
|
||||
'org.apache.commons.csv.CSVFormat',
|
||||
'org.apache.commons.csv.QuoteMode',
|
||||
'org.apache.kafka.clients.producer.KafkaProducer',
|
||||
'org.apache.kafka.clients.producer.Producer',
|
||||
'org.apache.kafka.clients.producer.ProducerRecord',
|
||||
'org.codehaus.stax2.XMLStreamWriter2',
|
||||
'org.osgi.framework.AdaptPermission',
|
||||
'org.osgi.framework.AdminPermission',
|
||||
'org.osgi.framework.Bundle',
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
'org.osgi.framework.BundleEvent',
|
||||
'org.osgi.framework.BundleReference',
|
||||
'org.osgi.framework.FrameworkUtil',
|
||||
'org.osgi.framework.SynchronousBundleListener',
|
||||
'org.osgi.framework.wiring.BundleWire',
|
||||
'org.osgi.framework.wiring.BundleWiring',
|
||||
'org.zeromq.ZMQ$Context',
|
||||
'org.zeromq.ZMQ$Socket',
|
||||
'org.zeromq.ZMQ',
|
||||
|
||||
// from org.apache.log4j.net.SMTPAppender (log4j)
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.Message',
|
||||
'javax.mail.Multipart',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
]
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.log4j;
|
||||
|
||||
import org.apache.log4j.helpers.ThreadLocalMap;
|
||||
|
||||
/**
|
||||
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
|
||||
*
|
||||
* This hack fixes up the pkg private members as if it had detected the java version correctly.
|
||||
*/
|
||||
public class Java9Hack {
|
||||
|
||||
public static void fixLog4j() {
|
||||
if (MDC.mdc.tlm == null) {
|
||||
MDC.mdc.java1 = false;
|
||||
MDC.mdc.tlm = new ThreadLocalMap();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -39,7 +39,7 @@ import java.util.Set;
|
|||
|
||||
public final class ExceptionsHelper {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
|
||||
private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
|
||||
|
||||
public static RuntimeException convertToRuntime(Exception e) {
|
||||
if (e instanceof RuntimeException) {
|
||||
|
|
|
@ -73,6 +73,8 @@ public class Version {
|
|||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_5_ID = 2030599;
|
||||
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_4_0_ID = 2040099;
|
||||
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -110,6 +112,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_4_0_ID:
|
||||
return V_2_4_0;
|
||||
case V_2_3_5_ID:
|
||||
return V_2_3_5;
|
||||
case V_2_3_4_ID:
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -335,7 +334,7 @@ public class ActionModule extends AbstractModule {
|
|||
this.actionPlugins = actionPlugins;
|
||||
actions = setupActions(actionPlugins);
|
||||
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
|
||||
restController = new RestController(settings, headers);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.health;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
|
@ -41,8 +42,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
private String[] indices;
|
||||
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
|
||||
private ClusterHealthStatus waitForStatus;
|
||||
private int waitForRelocatingShards = -1;
|
||||
private int waitForActiveShards = -1;
|
||||
private boolean waitForNoRelocatingShards = false;
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE;
|
||||
private String waitForNodes = "";
|
||||
private Priority waitForEvents = null;
|
||||
|
||||
|
@ -102,24 +103,52 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
return waitForStatus(ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
public int waitForRelocatingShards() {
|
||||
return waitForRelocatingShards;
|
||||
public boolean waitForNoRelocatingShards() {
|
||||
return waitForNoRelocatingShards;
|
||||
}
|
||||
|
||||
public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
|
||||
this.waitForRelocatingShards = waitForRelocatingShards;
|
||||
/**
|
||||
* Sets whether the request should wait for there to be no relocating shards before
|
||||
* retrieving the cluster health status. Defaults to {@code false}, meaning the
|
||||
* operation does not wait on there being no more relocating shards. Set to <code>true</code>
|
||||
* to wait until the number of relocating shards in the cluster is 0.
|
||||
*/
|
||||
public ClusterHealthRequest waitForNoRelocatingShards(boolean waitForNoRelocatingShards) {
|
||||
this.waitForNoRelocatingShards = waitForNoRelocatingShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
public int waitForActiveShards() {
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
/**
|
||||
* Sets the number of shard copies that must be active across all indices before getting the
|
||||
* health status. Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
|
||||
* Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
|
||||
* all replicas) to be active across all indices in the cluster. Otherwise, use
|
||||
* {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
|
||||
* total number of shard copies to wait for.
|
||||
*/
|
||||
public ClusterHealthRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
|
||||
// the default for cluster health request is 0, not 1
|
||||
this.waitForActiveShards = ActiveShardCount.NONE;
|
||||
} else {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public ClusterHealthRequest waitForActiveShards(final int waitForActiveShards) {
|
||||
return waitForActiveShards(ActiveShardCount.from(waitForActiveShards));
|
||||
}
|
||||
|
||||
public String waitForNodes() {
|
||||
return waitForNodes;
|
||||
}
|
||||
|
@ -162,8 +191,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
if (in.readBoolean()) {
|
||||
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
|
||||
}
|
||||
waitForRelocatingShards = in.readInt();
|
||||
waitForActiveShards = in.readInt();
|
||||
waitForNoRelocatingShards = in.readBoolean();
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
waitForNodes = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
waitForEvents = Priority.readFrom(in);
|
||||
|
@ -188,8 +217,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
out.writeBoolean(true);
|
||||
out.writeByte(waitForStatus.value());
|
||||
}
|
||||
out.writeInt(waitForRelocatingShards);
|
||||
out.writeInt(waitForActiveShards);
|
||||
out.writeBoolean(waitForNoRelocatingShards);
|
||||
waitForActiveShards.writeTo(out);
|
||||
out.writeString(waitForNodes);
|
||||
if (waitForEvents == null) {
|
||||
out.writeBoolean(false);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
|
@ -64,11 +65,40 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB
|
|||
return this;
|
||||
}
|
||||
|
||||
public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) {
|
||||
request.waitForRelocatingShards(waitForRelocatingShards);
|
||||
/**
|
||||
* Sets whether the request should wait for there to be no relocating shards before
|
||||
* retrieving the cluster health status. Defaults to <code>false</code>, meaning the
|
||||
* operation does not wait on there being no more relocating shards. Set to <code>true</code>
|
||||
* to wait until the number of relocating shards in the cluster is 0.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForNoRelocatingShards(boolean waitForRelocatingShards) {
|
||||
request.waitForNoRelocatingShards(waitForRelocatingShards);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that must be active before getting the health status.
|
||||
* Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
|
||||
* Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
|
||||
* all replicas) to be active across all indices in the cluster. Otherwise, use
|
||||
* {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
|
||||
* total number of shard copies that would exist across all indices in the cluster.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
|
||||
// the default for cluster health is 0, not 1
|
||||
request.waitForActiveShards(ActiveShardCount.NONE);
|
||||
} else {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
|
|
|
@ -19,8 +19,11 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -105,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("unexpected failure during [{}]", e, source);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
|
@ -125,10 +128,10 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
if (request.waitForStatus() == null) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForRelocatingShards() == -1) {
|
||||
if (request.waitForNoRelocatingShards() == false) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForActiveShards() == -1) {
|
||||
if (request.waitForActiveShards().equals(ActiveShardCount.NONE)) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForNodes().isEmpty()) {
|
||||
|
@ -203,11 +206,22 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) {
|
||||
waitForCounter++;
|
||||
}
|
||||
if (request.waitForRelocatingShards() != -1 && response.getRelocatingShards() <= request.waitForRelocatingShards()) {
|
||||
if (request.waitForNoRelocatingShards() && response.getRelocatingShards() == 0) {
|
||||
waitForCounter++;
|
||||
}
|
||||
if (request.waitForActiveShards() != -1 && response.getActiveShards() >= request.waitForActiveShards()) {
|
||||
waitForCounter++;
|
||||
if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
|
||||
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
|
||||
assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false :
|
||||
"waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE";
|
||||
if (waitForActiveShards.equals(ActiveShardCount.ALL)
|
||||
&& response.getUnassignedShards() == 0
|
||||
&& response.getInitializingShards() == 0) {
|
||||
// if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0
|
||||
waitForCounter++;
|
||||
} else if (waitForActiveShards.enoughShardsActive(response.getActiveShards())) {
|
||||
// there are enough active shards to meet the requirements of the request
|
||||
waitForCounter++;
|
||||
}
|
||||
}
|
||||
if (request.indices() != null && request.indices().length > 0) {
|
||||
try {
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -33,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -77,13 +79,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
private final ClusterRerouteRequest request;
|
||||
private final ActionListener<ClusterRerouteResponse> listener;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final AllocationService allocationService;
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
super(Priority.IMMEDIATE, request, listener);
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
@ -103,7 +105,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.settings;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -148,7 +150,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
//if the reroute fails we only log
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
||||
}
|
||||
|
||||
|
@ -166,7 +168,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,29 +21,22 @@ package org.elasticsearch.action.admin.indices.alias;
|
|||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Cluster state update request that allows to add or remove aliases
|
||||
*/
|
||||
public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateRequest<IndicesAliasesClusterStateUpdateRequest> {
|
||||
private final List<AliasAction> actions;
|
||||
|
||||
AliasAction[] actions;
|
||||
|
||||
public IndicesAliasesClusterStateUpdateRequest() {
|
||||
|
||||
public IndicesAliasesClusterStateUpdateRequest(List<AliasAction> actions) {
|
||||
this.actions = actions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the alias actions to be performed
|
||||
*/
|
||||
public AliasAction[] actions() {
|
||||
public List<AliasAction> actions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the alias actions to be executed
|
||||
*/
|
||||
public IndicesAliasesClusterStateUpdateRequest actions(AliasAction[] actions) {
|
||||
this.actions = actions;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.action.admin.indices.alias;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.AliasesRequest;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
|
@ -27,30 +29,41 @@ import org.elasticsearch.action.IndicesRequest;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction.Type;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||
|
||||
/**
|
||||
* A request to add/remove aliases for one or more indices.
|
||||
*/
|
||||
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> implements CompositeIndicesRequest {
|
||||
|
||||
private List<AliasActions> allAliasActions = new ArrayList<>();
|
||||
|
||||
//indices options that require every specified index to exist, expand wildcards only to open indices and
|
||||
|
@ -61,94 +74,317 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* Aliases can be added by passing multiple indices to the Request and
|
||||
* deleted by passing multiple indices and aliases. They are expanded into
|
||||
* distinct AliasAction instances when the request is processed. This class
|
||||
* holds the AliasAction and in addition the arrays or alias names and
|
||||
* indices that is later used to create the final AliasAction instances.
|
||||
/**
|
||||
* Request to take one or more actions on one or more indexes and alias combinations.
|
||||
*/
|
||||
public static class AliasActions implements AliasesRequest {
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
public static class AliasActions implements AliasesRequest, Writeable {
|
||||
public enum Type {
|
||||
ADD((byte) 0),
|
||||
REMOVE((byte) 1),
|
||||
REMOVE_INDEX((byte) 2);
|
||||
|
||||
private final byte value;
|
||||
|
||||
Type(byte value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public byte value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Type fromValue(byte value) {
|
||||
switch (value) {
|
||||
case 0: return ADD;
|
||||
case 1: return REMOVE;
|
||||
case 2: return REMOVE_INDEX;
|
||||
default: throw new IllegalArgumentException("No type for action [" + value + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a new {@link AliasAction} to add aliases.
|
||||
*/
|
||||
public static AliasActions add() {
|
||||
return new AliasActions(AliasActions.Type.ADD);
|
||||
}
|
||||
/**
|
||||
* Build a new {@link AliasAction} to remove aliases.
|
||||
*/
|
||||
public static AliasActions remove() {
|
||||
return new AliasActions(AliasActions.Type.REMOVE);
|
||||
}
|
||||
/**
|
||||
* Build a new {@link AliasAction} to remove aliases.
|
||||
*/
|
||||
public static AliasActions removeIndex() {
|
||||
return new AliasActions(AliasActions.Type.REMOVE_INDEX);
|
||||
}
|
||||
private static ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser(String name, Supplier<AliasActions> supplier) {
|
||||
ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser = new ObjectParser<>(name, supplier);
|
||||
parser.declareString((action, index) -> {
|
||||
if (action.indices() != null) {
|
||||
throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
|
||||
}
|
||||
action.index(index);
|
||||
}, new ParseField("index"));
|
||||
parser.declareStringArray(fromList(String.class, (action, indices) -> {
|
||||
if (action.indices() != null) {
|
||||
throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
|
||||
}
|
||||
action.indices(indices);
|
||||
}), new ParseField("indices"));
|
||||
parser.declareString((action, alias) -> {
|
||||
if (action.aliases() != null && action.aliases().length != 0) {
|
||||
throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
|
||||
}
|
||||
action.alias(alias);
|
||||
}, new ParseField("alias"));
|
||||
parser.declareStringArray(fromList(String.class, (action, aliases) -> {
|
||||
if (action.aliases() != null && action.aliases().length != 0) {
|
||||
throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
|
||||
}
|
||||
action.aliases(aliases);
|
||||
}), new ParseField("aliases"));
|
||||
return parser;
|
||||
}
|
||||
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> ADD_PARSER = parser("add", AliasActions::add);
|
||||
static {
|
||||
ADD_PARSER.declareObject(AliasActions::filter, (parser, m) -> {
|
||||
try {
|
||||
return parser.mapOrdered();
|
||||
} catch (IOException e) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Problems parsing [filter]", e);
|
||||
}
|
||||
}, new ParseField("filter"));
|
||||
// Since we need to support numbers AND strings here we have to use ValueType.INT.
|
||||
ADD_PARSER.declareField(AliasActions::routing, p -> p.text(), new ParseField("routing"), ValueType.INT);
|
||||
ADD_PARSER.declareField(AliasActions::indexRouting, p -> p.text(), new ParseField("index_routing"), ValueType.INT);
|
||||
ADD_PARSER.declareField(AliasActions::searchRouting, p -> p.text(), new ParseField("search_routing"), ValueType.INT);
|
||||
}
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_PARSER = parser("remove", AliasActions::remove);
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_INDEX_PARSER = parser("remove_index",
|
||||
AliasActions::removeIndex);
|
||||
|
||||
/**
|
||||
* Parser for any one {@link AliasAction}.
|
||||
*/
|
||||
public static final ConstructingObjectParser<AliasActions, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
||||
"alias_action", a -> {
|
||||
// Take the first action and complain if there are more than one actions
|
||||
AliasActions action = null;
|
||||
for (Object o : a) {
|
||||
if (o != null) {
|
||||
if (action == null) {
|
||||
action = (AliasActions) o;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Too many operations declared in on opeation entry");
|
||||
}
|
||||
}
|
||||
}
|
||||
return action;
|
||||
});
|
||||
static {
|
||||
PARSER.declareObject(optionalConstructorArg(), ADD_PARSER, new ParseField("add"));
|
||||
PARSER.declareObject(optionalConstructorArg(), REMOVE_PARSER, new ParseField("remove"));
|
||||
PARSER.declareObject(optionalConstructorArg(), REMOVE_INDEX_PARSER, new ParseField("remove_index"));
|
||||
}
|
||||
|
||||
private final AliasActions.Type type;
|
||||
private String[] indices;
|
||||
private String[] aliases = Strings.EMPTY_ARRAY;
|
||||
private AliasAction aliasAction;
|
||||
private String filter;
|
||||
private String routing;
|
||||
private String indexRouting;
|
||||
private String searchRouting;
|
||||
|
||||
public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(indices);
|
||||
aliases(aliases);
|
||||
AliasActions(AliasActions.Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public AliasActions(AliasAction.Type type, String index, String alias) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(alias);
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public AliasActions(StreamInput in) throws IOException {
|
||||
type = AliasActions.Type.fromValue(in.readByte());
|
||||
indices = in.readStringArray();
|
||||
aliases = in.readStringArray();
|
||||
filter = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
searchRouting = in.readOptionalString();
|
||||
indexRouting = in.readOptionalString();
|
||||
}
|
||||
|
||||
AliasActions(AliasAction.Type type, String[] index, String alias) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(alias);
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(type.value());
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(aliases);
|
||||
out.writeOptionalString(filter);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(searchRouting);
|
||||
out.writeOptionalString(indexRouting);
|
||||
}
|
||||
|
||||
public AliasActions(AliasAction action) {
|
||||
this.aliasAction = action;
|
||||
indices(action.index());
|
||||
aliases(action.alias());
|
||||
/**
|
||||
* Validate that the action is sane. Called when the action is added to the request because actions can be invalid while being
|
||||
* built.
|
||||
*/
|
||||
void validate() {
|
||||
if (indices == null) {
|
||||
throw new IllegalArgumentException("One of [index] or [indices] is required");
|
||||
}
|
||||
if (type != AliasActions.Type.REMOVE_INDEX && (aliases == null || aliases.length == 0)) {
|
||||
throw new IllegalArgumentException("One of [alias] or [aliases] is required");
|
||||
}
|
||||
}
|
||||
|
||||
public AliasActions(Type type, String index, String[] aliases) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(aliases);
|
||||
}
|
||||
|
||||
public AliasActions() {
|
||||
}
|
||||
|
||||
public AliasActions filter(Map<String, Object> filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AliasActions filter(QueryBuilder filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Type actionType() {
|
||||
return aliasAction.actionType();
|
||||
}
|
||||
|
||||
public void routing(String routing) {
|
||||
aliasAction.routing(routing);
|
||||
}
|
||||
|
||||
public void searchRouting(String searchRouting) {
|
||||
aliasAction.searchRouting(searchRouting);
|
||||
}
|
||||
|
||||
public void indexRouting(String indexRouting) {
|
||||
aliasAction.indexRouting(indexRouting);
|
||||
}
|
||||
|
||||
public AliasActions filter(String filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
/**
|
||||
* Type of the action to perform.
|
||||
*/
|
||||
public AliasActions.Type actionType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AliasActions indices(String... indices) {
|
||||
if (indices == null || indices.length == 0) {
|
||||
throw new IllegalArgumentException("[indices] can't be empty");
|
||||
}
|
||||
for (String index : indices) {
|
||||
if (false == Strings.hasLength(index)) {
|
||||
throw new IllegalArgumentException("[indices] can't contain empty string");
|
||||
}
|
||||
}
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the index this action is operating on.
|
||||
*/
|
||||
public AliasActions index(String index) {
|
||||
if (false == Strings.hasLength(index)) {
|
||||
throw new IllegalArgumentException("[index] can't be empty string");
|
||||
}
|
||||
this.indices = new String[] {index};
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aliases to use with this action.
|
||||
*/
|
||||
@Override
|
||||
public AliasActions aliases(String... aliases) {
|
||||
if (type == AliasActions.Type.REMOVE_INDEX) {
|
||||
throw new IllegalArgumentException("[aliases] is unsupported for [" + type + "]");
|
||||
}
|
||||
if (aliases == null || aliases.length == 0) {
|
||||
throw new IllegalArgumentException("[aliases] can't be empty");
|
||||
}
|
||||
for (String alias : aliases) {
|
||||
if (false == Strings.hasLength(alias)) {
|
||||
throw new IllegalArgumentException("[aliases] can't contain empty string");
|
||||
}
|
||||
}
|
||||
this.aliases = aliases;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the alias this action is operating on.
|
||||
*/
|
||||
public AliasActions alias(String alias) {
|
||||
if (type == AliasActions.Type.REMOVE_INDEX) {
|
||||
throw new IllegalArgumentException("[alias] is unsupported for [" + type + "]");
|
||||
}
|
||||
if (false == Strings.hasLength(alias)) {
|
||||
throw new IllegalArgumentException("[alias] can't be empty string");
|
||||
}
|
||||
this.aliases = new String[] {alias};
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the default routing.
|
||||
*/
|
||||
public AliasActions routing(String routing) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.routing = routing;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String searchRouting() {
|
||||
return searchRouting == null ? routing : searchRouting;
|
||||
}
|
||||
|
||||
public AliasActions searchRouting(String searchRouting) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[search_routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.searchRouting = searchRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String indexRouting() {
|
||||
return indexRouting == null ? routing : indexRouting;
|
||||
}
|
||||
|
||||
public AliasActions indexRouting(String indexRouting) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[index_routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.indexRouting = indexRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String filter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
public AliasActions filter(String filter) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[filter] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.filter = filter;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AliasActions filter(Map<String, Object> filter) {
|
||||
if (filter == null || filter.isEmpty()) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.map(filter);
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
public AliasActions filter(QueryBuilder filter) {
|
||||
if (filter == null) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
filter.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] aliases() {
|
||||
return aliases;
|
||||
|
@ -157,7 +393,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
@Override
|
||||
public boolean expandAliasesWildcards() {
|
||||
//remove operations support wildcards among aliases, add operations don't
|
||||
return aliasAction.actionType() == Type.REMOVE;
|
||||
return type == Type.REMOVE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -170,10 +406,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
return INDICES_OPTIONS;
|
||||
}
|
||||
|
||||
public AliasAction aliasAction() {
|
||||
return aliasAction;
|
||||
}
|
||||
|
||||
public String[] concreteAliases(MetaData metaData, String concreteIndex) {
|
||||
if (expandAliasesWildcards()) {
|
||||
//for DELETE we expand the aliases
|
||||
|
@ -191,83 +423,48 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
return aliases;
|
||||
}
|
||||
}
|
||||
public AliasActions readFrom(StreamInput in) throws IOException {
|
||||
indices = in.readStringArray();
|
||||
aliases = in.readStringArray();
|
||||
aliasAction = readAliasAction(in);
|
||||
return this;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AliasActions["
|
||||
+ "type=" + type
|
||||
+ ",indices=" + Arrays.toString(indices)
|
||||
+ ",aliases=" + Arrays.deepToString(aliases)
|
||||
+ ",filter=" + filter
|
||||
+ ",routing=" + routing
|
||||
+ ",indexRouting=" + indexRouting
|
||||
+ ",searchRouting=" + searchRouting
|
||||
+ "]";
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(aliases);
|
||||
this.aliasAction.writeTo(out);
|
||||
// equals, and hashCode implemented for easy testing of round trip
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
AliasActions other = (AliasActions) obj;
|
||||
return Objects.equals(type, other.type)
|
||||
&& Arrays.equals(indices, other.indices)
|
||||
&& Arrays.equals(aliases, other.aliases)
|
||||
&& Objects.equals(filter, other.filter)
|
||||
&& Objects.equals(routing, other.routing)
|
||||
&& Objects.equals(indexRouting, other.indexRouting)
|
||||
&& Objects.equals(searchRouting, other.searchRouting);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param indices The indices
|
||||
* Add the action to this request and validate it.
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public void addAliasAction(AliasActions aliasAction) {
|
||||
public IndicesAliasesRequest addAliasAction(AliasActions aliasAction) {
|
||||
aliasAction.validate();
|
||||
allAliasActions.add(aliasAction);
|
||||
}
|
||||
|
||||
|
||||
public IndicesAliasesRequest addAliasAction(AliasAction action) {
|
||||
addAliasAction(new AliasActions(action));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param filter The filter
|
||||
* @param indices The indices
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, Map<String, Object> filter, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param filterBuilder The filter
|
||||
* @param indices The indices
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, QueryBuilder filterBuilder, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Removes an alias to the index.
|
||||
*
|
||||
* @param indices The indices
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequest removeAlias(String[] indices, String... aliases) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an alias to the index.
|
||||
*
|
||||
* @param index The index
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequest removeAlias(String index, String... aliases) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, index, aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -285,50 +482,20 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
if (allAliasActions.isEmpty()) {
|
||||
return addValidationError("Must specify at least one alias action", validationException);
|
||||
}
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
if (CollectionUtils.isEmpty(aliasAction.aliases)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [alias/aliases] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias/aliases] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isEmpty(aliasAction.indices)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [index/indices] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String index : aliasAction.indices) {
|
||||
if (!Strings.hasText(index)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [index/indices] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
allAliasActions.add(readAliasActions(in));
|
||||
}
|
||||
allAliasActions = in.readList(AliasActions::new);
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(allAliasActions.size());
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
aliasAction.writeTo(out);
|
||||
}
|
||||
out.writeList(allAliasActions);
|
||||
writeTimeout(out);
|
||||
}
|
||||
|
||||
|
@ -336,11 +503,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
return INDICES_OPTIONS;
|
||||
}
|
||||
|
||||
private static AliasActions readAliasActions(StreamInput in) throws IOException {
|
||||
AliasActions actions = new AliasActions();
|
||||
return actions.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
return allAliasActions;
|
||||
|
|
|
@ -22,15 +22,15 @@ package org.elasticsearch.action.admin.indices.alias;
|
|||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
* Builder for request to modify many aliases at once.
|
||||
*/
|
||||
public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
|
||||
public class IndicesAliasesRequestBuilder
|
||||
extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
|
||||
|
||||
public IndicesAliasesRequestBuilder(ElasticsearchClient client, IndicesAliasesAction action) {
|
||||
super(client, action, new IndicesAliasesRequest());
|
||||
|
@ -43,7 +43,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias) {
|
||||
request.addAlias(alias, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) {
|
||||
request.addAlias(alias, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -66,8 +66,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) {
|
||||
AliasActions action = new AliasActions(AliasAction.Type.ADD, index, alias).filter(filter);
|
||||
request.addAliasAction(action);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -79,8 +78,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, String filter) {
|
||||
AliasActions action = new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter);
|
||||
request.addAliasAction(action);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -92,7 +90,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias, Map<String, Object> filter) {
|
||||
request.addAlias(alias, filter, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -104,7 +102,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) {
|
||||
request.addAlias(alias, filter, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -116,7 +114,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filterBuilder The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, QueryBuilder filterBuilder) {
|
||||
request.addAlias(alias, filterBuilder, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -128,7 +126,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param filterBuilder The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, QueryBuilder filterBuilder) {
|
||||
request.addAlias(alias, filterBuilder, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -139,7 +137,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String index, String alias) {
|
||||
request.removeAlias(index, alias);
|
||||
request.addAliasAction(AliasActions.remove().index(index).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -150,7 +148,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String[] indices, String... aliases) {
|
||||
request.removeAlias(indices, aliases);
|
||||
request.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -161,17 +159,12 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
|||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String index, String[] aliases) {
|
||||
request.removeAlias(index, aliases);
|
||||
request.addAliasAction(AliasActions.remove().index(index).aliases(aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias action to the request.
|
||||
*
|
||||
* @param aliasAction The alias action
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
|
||||
request.addAliasAction(aliasAction);
|
||||
public IndicesAliasesRequestBuilder removeIndex(String index) {
|
||||
request.addAliasAction(AliasActions.removeIndex().index(index));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
/**
|
||||
* Add/remove aliases action
|
||||
*/
|
||||
|
@ -86,31 +88,38 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
|
|||
//Expand the indices names
|
||||
List<AliasActions> actions = request.aliasActions();
|
||||
List<AliasAction> finalActions = new ArrayList<>();
|
||||
boolean hasOnlyDeletesButNoneCanBeDone = true;
|
||||
|
||||
// Resolve all the AliasActions into AliasAction instances and gather all the aliases
|
||||
Set<String> aliases = new HashSet<>();
|
||||
for (AliasActions action : actions) {
|
||||
//expand indices
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
|
||||
//collect the aliases
|
||||
Collections.addAll(aliases, action.aliases());
|
||||
for (String index : concreteIndices) {
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
AliasAction finalAction = new AliasAction(action.aliasAction());
|
||||
finalAction.index(index);
|
||||
finalAction.alias(alias);
|
||||
finalActions.add(finalAction);
|
||||
//if there is only delete requests, none will be added if the types do not map to any existing type
|
||||
hasOnlyDeletesButNoneCanBeDone = false;
|
||||
switch (action.actionType()) {
|
||||
case ADD:
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
finalActions.add(new AliasAction.Add(index, alias, action.filter(), action.indexRouting(), action.searchRouting()));
|
||||
}
|
||||
break;
|
||||
case REMOVE:
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
finalActions.add(new AliasAction.Remove(index, alias));
|
||||
}
|
||||
break;
|
||||
case REMOVE_INDEX:
|
||||
finalActions.add(new AliasAction.RemoveIndex(index));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hasOnlyDeletesButNoneCanBeDone && actions.size() != 0) {
|
||||
if (finalActions.isEmpty() && false == actions.isEmpty()) {
|
||||
throw new AliasesNotFoundException(aliases.toArray(new String[aliases.size()]));
|
||||
}
|
||||
request.aliasActions().clear();
|
||||
IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.actions(finalActions.toArray(new AliasAction[finalActions.size()]));
|
||||
IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(unmodifiableList(finalActions))
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout());
|
||||
|
||||
indexAliasesService.indicesAliases(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to delete indices [{}]", t, concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -92,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
} catch (IndexNotFoundException ex) {
|
||||
logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.open;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
@ -93,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to open indices [{}]", t, (Object)concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -47,11 +47,15 @@ import org.elasticsearch.index.shard.DocsStats;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
/**
|
||||
* Main class to swap the index pointed to by an alias, given some conditions
|
||||
*/
|
||||
|
@ -156,13 +160,12 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
|
||||
static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesUpdateRequest(String oldIndex, String newIndex,
|
||||
RolloverRequest request) {
|
||||
final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
|
||||
List<AliasAction> actions = unmodifiableList(Arrays.asList(
|
||||
new AliasAction.Add(newIndex, request.getAlias(), null, null, null),
|
||||
new AliasAction.Remove(oldIndex, request.getAlias())));
|
||||
final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions)
|
||||
.ackTimeout(request.ackTimeout())
|
||||
.masterNodeTimeout(request.masterNodeTimeout());
|
||||
AliasAction[] actions = new AliasAction[2];
|
||||
actions[0] = new AliasAction(AliasAction.Type.ADD, newIndex, request.getAlias());
|
||||
actions[1] = new AliasAction(AliasAction.Type.REMOVE, oldIndex, request.getAlias());
|
||||
updateRequest.actions(actions);
|
||||
return updateRequest;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.settings.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -92,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.shards;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
|
@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.gateway.AsyncShardFetch;
|
||||
|
@ -150,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
|
||||
private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> {
|
||||
|
||||
InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
|
||||
InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
|
||||
super(logger, type, shardId, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.delete;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -73,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug("failed to delete templates [{}]", e, request.name());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -94,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug("failed to put template [{}]", e, request.name());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -79,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -18,9 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
|
@ -31,7 +33,7 @@ import java.util.concurrent.TimeUnit;
|
|||
* Abstracts the low-level details of bulk request handling
|
||||
*/
|
||||
abstract class BulkRequestHandler {
|
||||
protected final ESLogger logger;
|
||||
protected final Logger logger;
|
||||
protected final Client client;
|
||||
|
||||
protected BulkRequestHandler(Client client) {
|
||||
|
@ -76,12 +78,12 @@ abstract class BulkRequestHandler {
|
|||
listener.afterBulk(executionId, bulkRequest, bulkResponse);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info("Bulk request {} has been cancelled.", e, executionId);
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to execute bulk request {}.", e, executionId);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
|
@ -142,10 +144,10 @@ abstract class BulkRequestHandler {
|
|||
bulkRequestSetupSuccessful = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info("Bulk request {} has been cancelled.", e, executionId);
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to execute bulk request {}.", e, executionId);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -101,4 +102,15 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
|
|||
}
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRetry() {
|
||||
for (BulkItemRequest item : items) {
|
||||
if (item.request() instanceof ReplicationRequest) {
|
||||
// all replication requests need to be notified here as well to ie. make sure that internal optimizations are
|
||||
// disabled see IndexRequest#canHaveDuplicates()
|
||||
((ReplicationRequest) item.request()).onRetry();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
@ -89,7 +89,7 @@ public class Retry {
|
|||
}
|
||||
|
||||
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final Client client;
|
||||
private final ActionListener<BulkResponse> listener;
|
||||
private final Iterator<TimeValue> backoff;
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
|
@ -30,8 +32,8 @@ import org.elasticsearch.action.index.IndexResponse;
|
|||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
|
@ -183,9 +185,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
|
||||
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
|
||||
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.get;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
|
@ -92,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
|||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
throw (ElasticsearchException) e;
|
||||
} else {
|
||||
logger.debug("{} failed to execute multi_get for [{}]/[{}]", e, shardId, item.type(), item.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
|
||||
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
/**
|
||||
* Operation type controls if the type of the index operation.
|
||||
*/
|
||||
public static enum OpType {
|
||||
public enum OpType {
|
||||
/**
|
||||
* Index the source. If there an existing document with the id, it will
|
||||
* be replaced.
|
||||
|
@ -152,6 +152,17 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
|
||||
private String pipeline;
|
||||
|
||||
/**
|
||||
* Value for {@link #getAutoGeneratedTimestamp()} if the document has an external
|
||||
* provided ID.
|
||||
*/
|
||||
public static final int UNSET_AUTO_GENERATED_TIMESTAMP = -1;
|
||||
|
||||
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
|
||||
|
||||
private boolean isRetry = false;
|
||||
|
||||
|
||||
public IndexRequest() {
|
||||
}
|
||||
|
||||
|
@ -202,6 +213,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
}
|
||||
|
||||
if (opType() != OpType.INDEX && id == null) {
|
||||
addValidationError("an id is required for a " + opType() + " operation", validationException);
|
||||
}
|
||||
|
||||
if (!versionType.validateVersionForWrites(version)) {
|
||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||
}
|
||||
|
@ -216,6 +231,11 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
|
||||
id.getBytes(StandardCharsets.UTF_8).length, validationException);
|
||||
}
|
||||
|
||||
if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
|
||||
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
|
||||
}
|
||||
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -589,10 +609,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
|
||||
// generate id if not already provided and id generation is allowed
|
||||
if (allowIdGeneration) {
|
||||
if (id == null) {
|
||||
id(UUIDs.base64UUID());
|
||||
}
|
||||
if (allowIdGeneration && id == null) {
|
||||
assert autoGeneratedTimestamp == -1;
|
||||
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
|
||||
id(UUIDs.base64UUID());
|
||||
}
|
||||
|
||||
// generate timestamp if not provided, we always have one post this stage...
|
||||
|
@ -639,6 +659,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
pipeline = in.readOptionalString();
|
||||
isRetry = in.readBoolean();
|
||||
autoGeneratedTimestamp = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -655,6 +677,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeOptionalString(pipeline);
|
||||
out.writeBoolean(isRetry);
|
||||
out.writeLong(autoGeneratedTimestamp);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -667,4 +691,25 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
return "index {[" + index + "][" + type + "][" + id + "], source[" + sSource + "]}";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this request has been sent to a shard copy more than once.
|
||||
*/
|
||||
public boolean isRetry() {
|
||||
return isRetry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRetry() {
|
||||
isRetry = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the timestamp the auto generated ID was created or {@value #UNSET_AUTO_GENERATED_TIMESTAMP} if the
|
||||
* document has no auto generated timestamp. This method will return a positive value iff the id was auto generated.
|
||||
*/
|
||||
public long getAutoGeneratedTimestamp() {
|
||||
return autoGeneratedTimestamp;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -205,15 +205,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a string representation of the {@link #setOpType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
|
||||
* be either "index" or "create".
|
||||
*/
|
||||
public IndexRequestBuilder setOpType(String opType) {
|
||||
request.opType(IndexRequest.OpType.fromString(opType));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
|
||||
*/
|
||||
|
|
|
@ -158,7 +158,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
|
||||
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType());
|
||||
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||
|
@ -171,7 +171,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType());
|
||||
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
}
|
||||
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -90,7 +92,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
|
||||
|
||||
executionService.executeIndexRequest(indexRequest, t -> {
|
||||
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
|
||||
listener.onFailure(t);
|
||||
}, success -> {
|
||||
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
|
||||
|
@ -105,7 +107,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
long ingestStartTimeInNanos = System.nanoTime();
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
||||
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
||||
bulkRequestModifier.markCurrentItemAsFailed(exception);
|
||||
}, (exception) -> {
|
||||
if (exception != null) {
|
||||
|
|
|
@ -20,8 +20,10 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
|
@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardIterator;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
@ -46,7 +47,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -58,7 +58,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
|
|||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final Logger logger;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
|
@ -77,7 +77,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardDocs;
|
||||
|
||||
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
|
||||
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
|
@ -191,7 +191,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
innerMoveToSecondPhase();
|
||||
} catch (Exception e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] while moving to second phase",
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
|
@ -211,15 +216,21 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", e, shard, request);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", e, firstPhaseName());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
|
@ -236,10 +247,13 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request, lastShard);
|
||||
}
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
|
@ -251,8 +265,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug("{}: Failed to execute [{}] lastShard [{}]", e,
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,12 +19,14 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
|
@ -43,7 +45,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
|
@ -105,7 +107,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
|
|
@ -20,13 +20,15 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
|
@ -50,7 +52,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
|
@ -113,7 +115,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -182,7 +184,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
|
||||
SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
|
@ -36,7 +36,7 @@ import java.io.IOException;
|
|||
|
||||
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
|
|
|
@ -20,13 +20,15 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
|
@ -46,7 +48,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
|
||||
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
|
@ -115,7 +117,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
|
|
@ -19,12 +19,14 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
|
@ -40,7 +42,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
|
|||
|
||||
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchScrollRequest request;
|
||||
|
@ -52,7 +54,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
|
@ -146,7 +148,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", e, searchId);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(e));
|
||||
successfulOps.decrementAndGet();
|
||||
|
|
|
@ -20,12 +20,14 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
|
@ -43,7 +45,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
|
|||
|
||||
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchScrollRequest request;
|
||||
|
@ -56,7 +58,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
private volatile ScoreDoc[] sortedShardDocs;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
|
@ -146,7 +148,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", failure, searchId);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(failure));
|
||||
successfulOps.decrementAndGet();
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
|
@ -144,7 +146,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
}
|
||||
|
||||
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
||||
logger.warn("Clear SC failed on node[{}]", e, node);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||
if (expectedOps.countDown()) {
|
||||
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
|
||||
} else {
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
|
@ -33,7 +33,7 @@ import java.util.List;
|
|||
*/
|
||||
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
|
||||
private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
|
||||
|
||||
final ThreadPool threadPool;
|
||||
volatile Object listeners;
|
||||
|
|
|
@ -120,9 +120,25 @@ public final class ActiveShardCount implements Writeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the given number of active shards is enough to meet
|
||||
* the required shard count represented by this instance. This method
|
||||
* should only be invoked with {@link ActiveShardCount} objects created
|
||||
* from {@link #from(int)}, or {@link #NONE} or {@link #ONE}.
|
||||
*/
|
||||
public boolean enoughShardsActive(final int activeShardCount) {
|
||||
if (this.value < 0) {
|
||||
throw new IllegalStateException("not enough information to resolve to shard count");
|
||||
}
|
||||
if (activeShardCount < 0) {
|
||||
throw new IllegalArgumentException("activeShardCount cannot be negative");
|
||||
}
|
||||
return this.value <= activeShardCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the given cluster state's routing table contains enough active
|
||||
* shards to meet the required shard count represented by this instance.
|
||||
* shards for the given index to meet the required shard count represented by this instance.
|
||||
*/
|
||||
public boolean enoughShardsActive(final ClusterState clusterState, final String indexName) {
|
||||
if (this == ActiveShardCount.NONE) {
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -41,16 +41,17 @@ import java.util.List;
|
|||
public final class AutoCreateIndex {
|
||||
|
||||
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
|
||||
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
|
||||
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope, Setting.Property.Dynamic);
|
||||
|
||||
private final boolean dynamicMappingDisabled;
|
||||
private final IndexNameExpressionResolver resolver;
|
||||
private final AutoCreate autoCreate;
|
||||
private volatile AutoCreate autoCreate;
|
||||
|
||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||
public AutoCreateIndex(Settings settings, ClusterSettings clusterSettings, IndexNameExpressionResolver resolver) {
|
||||
this.resolver = resolver;
|
||||
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
||||
this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -64,6 +65,8 @@ public final class AutoCreateIndex {
|
|||
* Should the index be auto created?
|
||||
*/
|
||||
public boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
// One volatile read, so that all checks are done against the same instance:
|
||||
final AutoCreate autoCreate = this.autoCreate;
|
||||
if (autoCreate.autoCreateIndex == false) {
|
||||
return false;
|
||||
}
|
||||
|
@ -87,7 +90,15 @@ public final class AutoCreateIndex {
|
|||
return false;
|
||||
}
|
||||
|
||||
private static class AutoCreate {
|
||||
AutoCreate getAutoCreate() {
|
||||
return autoCreate;
|
||||
}
|
||||
|
||||
void setAutoCreate(AutoCreate autoCreate) {
|
||||
this.autoCreate = autoCreate;
|
||||
}
|
||||
|
||||
static class AutoCreate {
|
||||
private final boolean autoCreateIndex;
|
||||
private final List<Tuple<String, Boolean>> expressions;
|
||||
|
||||
|
@ -128,5 +139,13 @@ public final class AutoCreateIndex {
|
|||
this.expressions = expressions;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
}
|
||||
|
||||
boolean isAutoCreateIndex() {
|
||||
return autoCreateIndex;
|
||||
}
|
||||
|
||||
List<Tuple<String, Boolean>> getExpressions() {
|
||||
return expressions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -75,8 +76,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
|
|||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn("Failed to send error response for action [{}] and request [{}]", e1,
|
||||
actionName, request);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"Failed to send error response for action [{}] and request [{}]",
|
||||
actionName,
|
||||
request),
|
||||
e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,10 +19,12 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -39,12 +41,12 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
|||
*/
|
||||
public static class Wrapper {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final boolean threadedListener;
|
||||
|
||||
public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) {
|
||||
public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
|
||||
|
@ -68,13 +70,13 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
|||
}
|
||||
}
|
||||
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final String executor;
|
||||
private final ActionListener<Response> listener;
|
||||
private final boolean forceExecution;
|
||||
|
||||
public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
|
||||
public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
|
||||
boolean forceExecution) {
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
|
@ -118,7 +120,8 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn("failed to execute failure callback on [{}], failure [{}]", e, listener, e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
|
@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskListener;
|
||||
|
@ -165,9 +165,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
|
||||
private final TransportAction<Request, Response> action;
|
||||
private final AtomicInteger index = new AtomicInteger();
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
private RequestFilterChain(TransportAction<Request, Response> action, ESLogger logger) {
|
||||
private RequestFilterChain(TransportAction<Request, Response> action, Logger logger) {
|
||||
this.action = action;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
@ -201,9 +201,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
|
||||
private final ActionFilter[] filters;
|
||||
private final AtomicInteger index;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) {
|
||||
private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
|
||||
this.filters = filters;
|
||||
this.index = new AtomicInteger(filters.length);
|
||||
this.logger = logger;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.broadcast;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -224,7 +225,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
if (e != null) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.trace("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"{}: failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,7 +240,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
if (logger.isDebugEnabled()) {
|
||||
if (e != null) {
|
||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"{}: failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.broadcast.node;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
|
@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -363,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||
String nodeId = node.getId();
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
|
@ -441,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
shardResults[shardIndex] = failure;
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"[{}] failed to execute operation for shard [{}]",
|
||||
actionName,
|
||||
shardRouting.shortSummary()),
|
||||
e);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"[{}] failed to execute operation for shard [{}]",
|
||||
actionName,
|
||||
shardRouting.shortSummary()),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.master;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
public void onFailure(Exception t) {
|
||||
if (t instanceof Discovery.FailedToCommitClusterStateException
|
||||
|| (t instanceof NotMasterException)) {
|
||||
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||
retry(t, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(t);
|
||||
|
@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
||||
listener.onFailure(new MasterNotDiscoveredException(failure));
|
||||
}
|
||||
}, changePredicate
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.nodes;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.NoSuchNodeException;
|
||||
|
@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -238,7 +239,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
|
||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute on node [{}]", t, nodeId);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
|||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -56,7 +57,7 @@ public class ReplicationOperation<
|
|||
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
|
||||
> {
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final Request request;
|
||||
private final Supplier<ClusterState> clusterStateSupplier;
|
||||
private final String opType;
|
||||
|
@ -86,7 +87,7 @@ public class ReplicationOperation<
|
|||
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
|
||||
ActionListener<PrimaryResultT> listener,
|
||||
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
|
||||
Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
|
||||
Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
|
||||
this.executeOnReplicas = executeOnReplicas;
|
||||
this.replicasProxy = replicas;
|
||||
this.primary = primary;
|
||||
|
@ -189,8 +190,14 @@ public class ReplicationOperation<
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception replicaException) {
|
||||
logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType,
|
||||
shard, replicaRequest);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failure while performing [{}] on replica {}, request [{}]",
|
||||
shard.shardId(),
|
||||
opType,
|
||||
shard,
|
||||
replicaRequest),
|
||||
replicaException);
|
||||
if (ignoreReplicaException(replicaException)) {
|
||||
decPendingAndFinishIfNeeded();
|
||||
} else {
|
||||
|
@ -198,7 +205,9 @@ public class ReplicationOperation<
|
|||
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
|
||||
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
|
||||
logger.warn("[{}] {}", replicaException, shard.shardId(), message);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
|
||||
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
|
||||
ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted,
|
||||
|
|
|
@ -248,4 +248,12 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
public String getDescription() {
|
||||
return toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called before this replication request is retried
|
||||
* the first time.
|
||||
*/
|
||||
public void onRetry() {
|
||||
// nothing by default
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
|
@ -56,7 +57,6 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportChannelResponseHandler;
|
||||
|
@ -65,6 +65,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
|
|||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponse.Empty;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -215,7 +216,9 @@ public abstract class TransportReplicationAction<
|
|||
channel.sendResponse(e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn("Failed to send response for {}", inner, actionName);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -444,7 +447,13 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof RetryOnReplicaException) {
|
||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"Retrying operation on replica, action [{}], request [{}]",
|
||||
transportReplicaAction,
|
||||
request),
|
||||
e);
|
||||
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -479,7 +488,12 @@ public abstract class TransportReplicationAction<
|
|||
channel.sendResponse(e);
|
||||
} catch (IOException responseException) {
|
||||
responseException.addSuppressed(e);
|
||||
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to send error message back to client for action [{}]",
|
||||
transportReplicaAction),
|
||||
responseException);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -682,8 +696,12 @@ public abstract class TransportReplicationAction<
|
|||
final Throwable cause = exp.unwrapCause();
|
||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(),
|
||||
request);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"received an error from node [{}] for request [{}], scheduling a retry",
|
||||
node.getId(),
|
||||
request),
|
||||
exp);
|
||||
retry(exp);
|
||||
} else {
|
||||
finishAsFailed(exp);
|
||||
|
@ -704,6 +722,7 @@ public abstract class TransportReplicationAction<
|
|||
return;
|
||||
}
|
||||
setPhase(task, "waiting_for_retry");
|
||||
request.onRetry();
|
||||
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -729,7 +748,9 @@ public abstract class TransportReplicationAction<
|
|||
void finishAsFailed(Exception failure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
|
||||
listener.onFailure(failure);
|
||||
} else {
|
||||
assert false : "finishAsFailed called but operation is already finished";
|
||||
|
@ -737,7 +758,13 @@ public abstract class TransportReplicationAction<
|
|||
}
|
||||
|
||||
void finishWithUnexpectedFailure(Exception failure) {
|
||||
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"unexpected error during the primary phase for action [{}], request [{}]",
|
||||
actionName,
|
||||
request),
|
||||
failure);
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
listener.onFailure(failure);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
|
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -241,13 +241,13 @@ public abstract class TransportWriteAction<
|
|||
private final RespondingWriteResult respond;
|
||||
private final IndexShard indexShard;
|
||||
private final WriteRequest<?> request;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
AsyncAfterWriteAction(final IndexShard indexShard,
|
||||
final WriteRequest<?> request,
|
||||
@Nullable final Translog.Location location,
|
||||
final RespondingWriteResult respond,
|
||||
final ESLogger logger) {
|
||||
final Logger logger) {
|
||||
this.indexShard = indexShard;
|
||||
this.request = request;
|
||||
boolean waitUntilRefresh = false;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.single.shard;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
|
@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
@ -187,7 +188,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
|
||||
private void onFailure(ShardRouting shardRouting, Exception e) {
|
||||
if (logger.isTraceEnabled() && e != null) {
|
||||
logger.trace("{}: failed to execute [{}]", e, shardRouting, internalRequest.request());
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
|
||||
}
|
||||
perform(e);
|
||||
}
|
||||
|
@ -205,7 +208,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}: failed to execute [{}]", failure, null, internalRequest.request());
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
|
||||
}
|
||||
}
|
||||
listener.onFailure(failure);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
|
@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
|
@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest;
|
|||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -275,7 +276,9 @@ public abstract class TransportTasksAction<
|
|||
|
||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute on node [{}]", t, nodeId);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
|
@ -87,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
|||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
throw (ElasticsearchException) t;
|
||||
} else {
|
||||
logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
|
||||
response.add(request.locations.get(i),
|
||||
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
@ -28,7 +29,6 @@ import org.elasticsearch.cli.Terminal;
|
|||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -81,7 +81,7 @@ final class Bootstrap {
|
|||
|
||||
/** initialize native resources */
|
||||
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
|
||||
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
final Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
|
||||
// check if the user is running as root, and bail
|
||||
if (Natives.definitelyRunningAsRoot()) {
|
||||
|
@ -227,7 +227,7 @@ final class Bootstrap {
|
|||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
|
||||
LogConfigurator.configure(environment.settings(), true);
|
||||
LogConfigurator.configure(environment, true);
|
||||
checkForCustomConfFile();
|
||||
|
||||
if (environment.pidFile() != null) {
|
||||
|
@ -264,7 +264,7 @@ final class Bootstrap {
|
|||
if (foreground) {
|
||||
Loggers.disableConsoleLogging();
|
||||
}
|
||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
if (INSTANCE.node != null) {
|
||||
logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ final class Bootstrap {
|
|||
|
||||
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
|
||||
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
|
||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -19,15 +19,16 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -100,7 +101,7 @@ final class BootstrapCheck {
|
|||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final ESLogger logger) {
|
||||
final Logger logger) {
|
||||
final List<String> errors = new ArrayList<>();
|
||||
final List<String> ignoredErrors = new ArrayList<>();
|
||||
|
||||
|
@ -136,7 +137,7 @@ final class BootstrapCheck {
|
|||
|
||||
}
|
||||
|
||||
static void log(final ESLogger logger, final String error) {
|
||||
static void log(final Logger logger, final String error) {
|
||||
logger.warn(error);
|
||||
}
|
||||
|
||||
|
@ -417,7 +418,7 @@ final class BootstrapCheck {
|
|||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxMapCount(ESLogger logger) {
|
||||
long getMaxMapCount(Logger logger) {
|
||||
final Path path = getProcSysVmMaxMapCountPath();
|
||||
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
|
||||
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
|
||||
|
@ -425,11 +426,15 @@ final class BootstrapCheck {
|
|||
try {
|
||||
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
|
||||
} catch (final NumberFormatException e) {
|
||||
logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unable to parse vm.max_map_count [{}]",
|
||||
rawProcSysVmMaxMapCount),
|
||||
e);
|
||||
}
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
logger.warn("I/O exception while trying to read [{}]", e, path);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -116,4 +116,5 @@ class Elasticsearch extends SettingCommand {
|
|||
static void close(String[] args) throws IOException {
|
||||
Bootstrap.stop();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOError;
|
||||
|
@ -76,14 +77,17 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
|||
|
||||
// visible for testing
|
||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.error("fatal error in thread [{}], exiting", t, threadName);
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.error(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.warn("uncaught exception in thread [{}]", t, threadName);
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap;
|
|||
import com.sun.jna.Native;
|
||||
import com.sun.jna.NativeLong;
|
||||
import com.sun.jna.Structure;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
*/
|
||||
final class JNACLibrary {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class);
|
||||
private static final Logger logger = Loggers.getLogger(JNACLibrary.class);
|
||||
|
||||
public static final int MCL_CURRENT = 1;
|
||||
public static final int ENOMEM = 12;
|
||||
|
|
|
@ -25,8 +25,8 @@ import com.sun.jna.NativeLong;
|
|||
import com.sun.jna.Pointer;
|
||||
import com.sun.jna.Structure;
|
||||
import com.sun.jna.win32.StdCallLibrary;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -40,7 +40,7 @@ import java.util.List;
|
|||
*/
|
||||
final class JNAKernel32Library {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
|
||||
private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class);
|
||||
|
||||
// Callbacks must be kept around in order to be able to be called later,
|
||||
// when the Windows ConsoleCtrlHandler sends an event.
|
||||
|
|
|
@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import com.sun.jna.Native;
|
||||
import com.sun.jna.Pointer;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
|
@ -39,7 +39,7 @@ class JNANatives {
|
|||
/** no instantiation */
|
||||
private JNANatives() {}
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
|
||||
private static final Logger logger = Loggers.getLogger(JNANatives.class);
|
||||
|
||||
// Set to true, in case native mlockall call was successful
|
||||
static boolean LOCAL_MLOCKALL = false;
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -76,7 +76,7 @@ public class JarHell {
|
|||
*/
|
||||
public static void checkJarHell() throws Exception {
|
||||
ClassLoader loader = JarHell.class.getClassLoader();
|
||||
ESLogger logger = Loggers.getLogger(JarHell.class);
|
||||
Logger logger = Loggers.getLogger(JarHell.class);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
|
||||
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
|
||||
|
@ -150,7 +150,7 @@ public class JarHell {
|
|||
*/
|
||||
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
|
||||
public static void checkJarHell(URL urls[]) throws Exception {
|
||||
ESLogger logger = Loggers.getLogger(JarHell.class);
|
||||
Logger logger = Loggers.getLogger(JarHell.class);
|
||||
// we don't try to be sneaky and use deprecated/internal/not portable stuff
|
||||
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
|
||||
// a "list" at all. So just exclude any elements underneath the java home
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
@ -32,7 +32,7 @@ final class Natives {
|
|||
/** no instantiation */
|
||||
private Natives() {}
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(Natives.class);
|
||||
private static final Logger logger = Loggers.getLogger(Natives.class);
|
||||
|
||||
// marker to determine if the JNA class files are available to the JVM
|
||||
static final boolean JNA_AVAILABLE;
|
||||
|
|
|
@ -26,9 +26,9 @@ import com.sun.jna.NativeLong;
|
|||
import com.sun.jna.Pointer;
|
||||
import com.sun.jna.Structure;
|
||||
import com.sun.jna.ptr.PointerByReference;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -92,7 +92,7 @@ import java.util.Map;
|
|||
*/
|
||||
// not an example of how to write code!!!
|
||||
final class Seccomp {
|
||||
private static final ESLogger logger = Loggers.getLogger(Seccomp.class);
|
||||
private static final Logger logger = Loggers.getLogger(Seccomp.class);
|
||||
|
||||
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
package org.elasticsearch.client.transport;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -32,9 +33,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
|||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -43,11 +43,11 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -340,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
|||
transportService.connectToNode(node);
|
||||
} catch (Exception e) {
|
||||
it.remove();
|
||||
logger.debug("failed to connect to discovered node [{}]", e, node);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -377,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
|||
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
||||
transportService.connectToNodeLight(listedNode);
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
||||
newFilteredNodes.add(listedNode);
|
||||
continue;
|
||||
}
|
||||
|
@ -409,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
|||
newNodes.add(listedNode);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
|
||||
logger.info(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
}
|
||||
}
|
||||
|
@ -453,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
|||
transportService.connectToNodeLight(listedNode);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to connect to node [{}], ignoring...", e, listedNode);
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
||||
latch.countDown();
|
||||
return;
|
||||
}
|
||||
|
@ -482,13 +487,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
|||
|
||||
@Override
|
||||
public void handleException(TransportException e) {
|
||||
logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode);
|
||||
logger.info(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to get local cluster state for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode);
|
||||
logger.info(
|
||||
(Supplier<?>)() -> new ParameterizedMessage(
|
||||
"failed to get local cluster state info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
|
@ -44,17 +45,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
||||
|
@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
*/
|
||||
public class ClusterStateObserver {
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final Logger logger;
|
||||
|
||||
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
|
||||
|
||||
|
@ -58,7 +58,7 @@ public class ClusterStateObserver {
|
|||
volatile boolean timedOut;
|
||||
|
||||
|
||||
public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) {
|
||||
public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) {
|
||||
this(clusterService, new TimeValue(60000), logger, contextHolder);
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ public class ClusterStateObserver {
|
|||
* will fail any existing or new #waitForNextChange calls. Set to null
|
||||
* to wait indefinitely
|
||||
*/
|
||||
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) {
|
||||
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
|
||||
this.clusterService = clusterService;
|
||||
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
|
||||
this.timeOutValue = timeout;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
|
@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
return clusterInfo;
|
||||
}
|
||||
|
||||
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
|
||||
static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
|
||||
MetaData meta = state.getMetaData();
|
||||
for (ShardStats s : stats) {
|
||||
|
@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
}
|
||||
}
|
||||
|
||||
static void fillDiskUsagePerNode(ESLogger logger, List<NodeStats> nodeStatsArray,
|
||||
static void fillDiskUsagePerNode(Logger logger, List<NodeStats> nodeStatsArray,
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages,
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
|
||||
for (NodeStats nodeStats : nodeStatsArray) {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -91,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
|||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to disconnect to node [{}]", e, node);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -113,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
|||
nodeFailureCount = nodeFailureCount + 1;
|
||||
// log every 6th failure
|
||||
if ((nodeFailureCount % 6) == 1) {
|
||||
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
|
||||
final int finalNodeFailureCount = nodeFailureCount;
|
||||
logger.warn(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
|
||||
}
|
||||
nodes.put(node, nodeFailureCount);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
|
@ -43,7 +46,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
|
@ -108,7 +110,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
if (isMasterChannelException(exp)) {
|
||||
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
|
||||
} else {
|
||||
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
|
||||
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +171,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
|
||||
listener.onFailure(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
|
@ -184,9 +186,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) {
|
||||
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
|
||||
this.clusterService = clusterService;
|
||||
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
|
||||
this.logger = logger;
|
||||
|
@ -194,7 +196,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
|
||||
logger.warn("{} received shard failed for {}", request.failure, request.shardId, request);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed",
|
||||
request,
|
||||
|
@ -203,12 +205,12 @@ public class ShardStateAction extends AbstractComponent {
|
|||
new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception channelException) {
|
||||
channelException.addSuppressed(e);
|
||||
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,7 +220,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
try {
|
||||
channel.sendResponse(new NotMasterException(source));
|
||||
} catch (Exception channelException) {
|
||||
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,7 +229,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Exception channelException) {
|
||||
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -238,9 +240,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.routingService = routingService;
|
||||
this.logger = logger;
|
||||
|
@ -315,7 +317,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
batchResultBuilder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
|
||||
// failures are communicated back to the requester
|
||||
// cluster state will not be updated in this case
|
||||
batchResultBuilder.failures(tasksToBeApplied, e);
|
||||
|
@ -352,9 +354,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) {
|
||||
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
|
||||
this.clusterService = clusterService;
|
||||
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
|
||||
this.logger = logger;
|
||||
|
@ -375,9 +377,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
|
||||
private final AllocationService allocationService;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
|
||||
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) {
|
||||
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
@ -431,7 +433,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
builder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
|
||||
builder.failures(tasksToBeApplied, e);
|
||||
}
|
||||
|
||||
|
@ -440,7 +442,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("unexpected failure during [{}]", e, source);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,213 +19,167 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
||||
/**
|
||||
*
|
||||
* Individual operation to perform on the cluster state as part of an {@link IndicesAliasesRequest}.
|
||||
*/
|
||||
public class AliasAction implements Streamable {
|
||||
public abstract class AliasAction {
|
||||
private final String index;
|
||||
|
||||
public static enum Type {
|
||||
ADD((byte) 0),
|
||||
REMOVE((byte) 1);
|
||||
|
||||
private final byte value;
|
||||
|
||||
Type(byte value) {
|
||||
this.value = value;
|
||||
private AliasAction(String index) {
|
||||
if (false == Strings.hasText(index)) {
|
||||
throw new IllegalArgumentException("[index] is required");
|
||||
}
|
||||
|
||||
public byte value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Type fromValue(byte value) {
|
||||
if (value == 0) {
|
||||
return ADD;
|
||||
} else if (value == 1) {
|
||||
return REMOVE;
|
||||
} else {
|
||||
throw new IllegalArgumentException("No type for action [" + value + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Type actionType;
|
||||
|
||||
private String index;
|
||||
|
||||
private String alias;
|
||||
|
||||
@Nullable
|
||||
private String filter;
|
||||
|
||||
@Nullable
|
||||
private String indexRouting;
|
||||
|
||||
@Nullable
|
||||
private String searchRouting;
|
||||
|
||||
private AliasAction() {
|
||||
|
||||
}
|
||||
|
||||
public AliasAction(AliasAction other) {
|
||||
this.actionType = other.actionType;
|
||||
this.index = other.index;
|
||||
this.alias = other.alias;
|
||||
this.filter = other.filter;
|
||||
this.indexRouting = other.indexRouting;
|
||||
this.searchRouting = other.searchRouting;
|
||||
}
|
||||
|
||||
public AliasAction(Type actionType) {
|
||||
this.actionType = actionType;
|
||||
}
|
||||
|
||||
public AliasAction(Type actionType, String index, String alias) {
|
||||
this.actionType = actionType;
|
||||
this.index = index;
|
||||
this.alias = alias;
|
||||
}
|
||||
|
||||
public AliasAction(Type actionType, String index, String alias, String filter) {
|
||||
this.actionType = actionType;
|
||||
this.index = index;
|
||||
this.alias = alias;
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
public Type actionType() {
|
||||
return actionType;
|
||||
}
|
||||
|
||||
public AliasAction index(String index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String index() {
|
||||
/**
|
||||
* Get the index on which the operation should act.
|
||||
*/
|
||||
public String getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public AliasAction alias(String alias) {
|
||||
this.alias = alias;
|
||||
return this;
|
||||
|
||||
/**
|
||||
* Should this action remove the index? Actions that return true from this will never execute
|
||||
* {@link #apply(NewAliasValidator, MetaData.Builder, IndexMetaData)}.
|
||||
*/
|
||||
abstract boolean removeIndex();
|
||||
|
||||
/**
|
||||
* Apply the action.
|
||||
*
|
||||
* @param aliasValidator call to validate a new alias before adding it to the builder
|
||||
* @param metadata metadata builder for the changes made by all actions as part of this request
|
||||
* @param index metadata for the index being changed
|
||||
* @return did this action make any changes?
|
||||
*/
|
||||
abstract boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index);
|
||||
|
||||
/**
|
||||
* Validate a new alias.
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface NewAliasValidator {
|
||||
void validate(String alias, @Nullable String indexRouting, @Nullable String filter);
|
||||
}
|
||||
|
||||
public String alias() {
|
||||
return alias;
|
||||
}
|
||||
/**
|
||||
* Operation to add an alias to an index.
|
||||
*/
|
||||
public static class Add extends AliasAction {
|
||||
private final String alias;
|
||||
|
||||
public String filter() {
|
||||
return filter;
|
||||
}
|
||||
@Nullable
|
||||
private final String filter;
|
||||
|
||||
public AliasAction filter(String filter) {
|
||||
this.filter = filter;
|
||||
return this;
|
||||
}
|
||||
@Nullable
|
||||
private final String indexRouting;
|
||||
|
||||
public AliasAction filter(Map<String, Object> filter) {
|
||||
if (filter == null || filter.isEmpty()) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
@Nullable
|
||||
private final String searchRouting;
|
||||
|
||||
/**
|
||||
* Build the operation.
|
||||
*/
|
||||
public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, @Nullable String searchRouting) {
|
||||
super(index);
|
||||
if (false == Strings.hasText(alias)) {
|
||||
throw new IllegalArgumentException("[alias] is required");
|
||||
}
|
||||
this.alias = alias;
|
||||
this.filter = filter;
|
||||
this.indexRouting = indexRouting;
|
||||
this.searchRouting = searchRouting;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.map(filter);
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
|
||||
|
||||
/**
|
||||
* Alias to add to the index.
|
||||
*/
|
||||
public String getAlias() {
|
||||
return alias;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean removeIndex() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
|
||||
aliasValidator.validate(alias, indexRouting, filter);
|
||||
AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).filter(filter).indexRouting(indexRouting)
|
||||
.searchRouting(searchRouting).build();
|
||||
// Check if this alias already exists
|
||||
AliasMetaData currentAliasMd = index.getAliases().get(alias);
|
||||
if (currentAliasMd != null && currentAliasMd.equals(newAliasMd)) {
|
||||
// It already exists, ignore it
|
||||
return false;
|
||||
}
|
||||
metadata.put(IndexMetaData.builder(index).putAlias(newAliasMd));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public AliasAction filter(QueryBuilder queryBuilder) {
|
||||
if (queryBuilder == null) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
/**
|
||||
* Operation to remove an alias from an index.
|
||||
*/
|
||||
public static class Remove extends AliasAction {
|
||||
private final String alias;
|
||||
|
||||
/**
|
||||
* Build the operation.
|
||||
*/
|
||||
public Remove(String index, String alias) {
|
||||
super(index);
|
||||
if (false == Strings.hasText(alias)) {
|
||||
throw new IllegalArgumentException("[alias] is required");
|
||||
}
|
||||
this.alias = alias;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
queryBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
|
||||
|
||||
/**
|
||||
* Alias to remove from the index.
|
||||
*/
|
||||
public String getAlias() {
|
||||
return alias;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean removeIndex() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
|
||||
if (false == index.getAliases().containsKey(alias)) {
|
||||
return false;
|
||||
}
|
||||
metadata.put(IndexMetaData.builder(index).removeAlias(alias));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public AliasAction routing(String routing) {
|
||||
this.indexRouting = routing;
|
||||
this.searchRouting = routing;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Operation to remove an index. This is an "alias action" because it allows us to remove an index at the same time as we remove add an
|
||||
* alias to replace it.
|
||||
*/
|
||||
public static class RemoveIndex extends AliasAction {
|
||||
public RemoveIndex(String index) {
|
||||
super(index);
|
||||
}
|
||||
|
||||
public String indexRouting() {
|
||||
return indexRouting;
|
||||
}
|
||||
@Override
|
||||
boolean removeIndex() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public AliasAction indexRouting(String indexRouting) {
|
||||
this.indexRouting = indexRouting;
|
||||
return this;
|
||||
@Override
|
||||
boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
public String searchRouting() {
|
||||
return searchRouting;
|
||||
}
|
||||
|
||||
public AliasAction searchRouting(String searchRouting) {
|
||||
this.searchRouting = searchRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
public static AliasAction readAliasAction(StreamInput in) throws IOException {
|
||||
AliasAction aliasAction = new AliasAction();
|
||||
aliasAction.readFrom(in);
|
||||
return aliasAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
actionType = Type.fromValue(in.readByte());
|
||||
index = in.readOptionalString();
|
||||
alias = in.readOptionalString();
|
||||
filter = in.readOptionalString();
|
||||
indexRouting = in.readOptionalString();
|
||||
searchRouting = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(actionType.value());
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalString(alias);
|
||||
out.writeOptionalString(filter);
|
||||
out.writeOptionalString(indexRouting);
|
||||
out.writeOptionalString(searchRouting);
|
||||
}
|
||||
|
||||
public static AliasAction newAddAliasAction(String index, String alias) {
|
||||
return new AliasAction(Type.ADD, index, alias);
|
||||
}
|
||||
|
||||
public static AliasAction newRemoveAliasAction(String index, String alias) {
|
||||
return new AliasAction(Type.REMOVE, index, alias);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -33,6 +34,7 @@ import org.elasticsearch.indices.InvalidAliasNameException;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Validator for an alias, to be used before adding an alias to the index metadata
|
||||
|
@ -45,22 +47,13 @@ public class AliasValidator extends AbstractComponent {
|
|||
super(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasAction} and make sure
|
||||
* it's valid before it gets added to the index metadata. Doesn't validate the alias filter.
|
||||
* @throws IllegalArgumentException if the alias is not valid
|
||||
*/
|
||||
public void validateAliasAction(AliasAction aliasAction, MetaData metaData) {
|
||||
validateAlias(aliasAction.alias(), aliasAction.index(), aliasAction.indexRouting(), metaData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to validate an {@link org.elasticsearch.action.admin.indices.alias.Alias} and make sure
|
||||
* it's valid before it gets added to the index metadata. Doesn't validate the alias filter.
|
||||
* @throws IllegalArgumentException if the alias is not valid
|
||||
*/
|
||||
public void validateAlias(Alias alias, String index, MetaData metaData) {
|
||||
validateAlias(alias.name(), index, alias.indexRouting(), metaData);
|
||||
validateAlias(alias.name(), index, alias.indexRouting(), name -> metaData.index(name));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -69,7 +62,7 @@ public class AliasValidator extends AbstractComponent {
|
|||
* @throws IllegalArgumentException if the alias is not valid
|
||||
*/
|
||||
public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) {
|
||||
validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData);
|
||||
validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), name -> metaData.index(name));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,16 +83,19 @@ public class AliasValidator extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private void validateAlias(String alias, String index, String indexRouting, MetaData metaData) {
|
||||
/**
|
||||
* Validate a proposed alias.
|
||||
*/
|
||||
public void validateAlias(String alias, String index, @Nullable String indexRouting, Function<String, IndexMetaData> indexLookup) {
|
||||
validateAliasStandalone(alias, indexRouting);
|
||||
|
||||
if (!Strings.hasText(index)) {
|
||||
throw new IllegalArgumentException("index name is required");
|
||||
}
|
||||
|
||||
assert metaData != null;
|
||||
if (metaData.hasIndex(alias)) {
|
||||
throw new InvalidAliasNameException(metaData.index(alias).getIndex(), alias, "an index exists with the same name as the alias");
|
||||
IndexMetaData indexNamedSameAsAlias = indexLookup.apply(alias);
|
||||
if (indexNamedSameAsAlias != null) {
|
||||
throw new InvalidAliasNameException(indexNamedSameAsAlias.getIndex(), alias, "an index exists with the same name as the alias");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,11 +37,11 @@ import org.elasticsearch.index.Index;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
|
@ -219,7 +219,7 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
/**
|
||||
* Add a set of deleted indexes to the list of tombstones in the cluster state.
|
||||
*/
|
||||
public Builder addTombstones(final Index[] indices) {
|
||||
public Builder addTombstones(final Collection<Index> indices) {
|
||||
for (Index index : indices) {
|
||||
addTombstone(index);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
|
@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -758,7 +758,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
|
||||
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
|
||||
* specify a unit. */
|
||||
public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) {
|
||||
public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
|
||||
Settings.Builder newPersistentSettings = null;
|
||||
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
|
||||
String settingName = ent.getKey();
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -446,9 +448,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
if (e instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create", e, request.index());
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
} else {
|
||||
logger.debug("[{}] failed to create", e, request.index());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
}
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
|
|
@ -37,11 +37,13 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
|
||||
/**
|
||||
*
|
||||
* Deletes indices.
|
||||
*/
|
||||
public class MetaDataDeleteIndexService extends AbstractComponent {
|
||||
|
||||
|
@ -56,7 +58,8 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request,
|
||||
final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
if (request.indices() == null || request.indices().length == 0) {
|
||||
throw new IllegalArgumentException("Index name is required");
|
||||
}
|
||||
|
@ -71,37 +74,43 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
final MetaData meta = currentState.metaData();
|
||||
final Index[] indices = request.indices();
|
||||
final Set<IndexMetaData> metaDatas = Arrays.asList(indices).stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet());
|
||||
// Check if index deletion conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexDeletion(currentState, metaDatas);
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(meta);
|
||||
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
|
||||
final IndexGraveyard.Builder graveyardBuilder = IndexGraveyard.builder(metaDataBuilder.indexGraveyard());
|
||||
final int previousGraveyardSize = graveyardBuilder.tombstones().size();
|
||||
for (final Index index : indices) {
|
||||
String indexName = index.getName();
|
||||
logger.debug("[{}] deleting index", index);
|
||||
routingTableBuilder.remove(indexName);
|
||||
clusterBlocksBuilder.removeIndexBlocks(indexName);
|
||||
metaDataBuilder.remove(indexName);
|
||||
}
|
||||
// add tombstones to the cluster state for each deleted index
|
||||
final IndexGraveyard currentGraveyard = graveyardBuilder.addTombstones(indices).build(settings);
|
||||
metaDataBuilder.indexGraveyard(currentGraveyard); // the new graveyard set on the metadata
|
||||
logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.",
|
||||
graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size());
|
||||
|
||||
MetaData newMetaData = metaDataBuilder.build();
|
||||
ClusterBlocks blocks = clusterBlocksBuilder.build();
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(
|
||||
ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build(),
|
||||
"deleted indices [" + indices + "]");
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
|
||||
return deleteIndices(currentState, Arrays.asList(request.indices()));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete some indices from the cluster state.
|
||||
*/
|
||||
public ClusterState deleteIndices(ClusterState currentState, Collection<Index> indices) {
|
||||
final MetaData meta = currentState.metaData();
|
||||
final Set<IndexMetaData> metaDatas = indices.stream().map(i -> meta.getIndexSafe(i)).collect(toSet());
|
||||
// Check if index deletion conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexDeletion(currentState, metaDatas);
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(meta);
|
||||
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
|
||||
final IndexGraveyard.Builder graveyardBuilder = IndexGraveyard.builder(metaDataBuilder.indexGraveyard());
|
||||
final int previousGraveyardSize = graveyardBuilder.tombstones().size();
|
||||
for (final Index index : indices) {
|
||||
String indexName = index.getName();
|
||||
logger.debug("[{}] deleting index", index);
|
||||
routingTableBuilder.remove(indexName);
|
||||
clusterBlocksBuilder.removeIndexBlocks(indexName);
|
||||
metaDataBuilder.remove(indexName);
|
||||
}
|
||||
// add tombstones to the cluster state for each deleted index
|
||||
final IndexGraveyard currentGraveyard = graveyardBuilder.addTombstones(indices).build(settings);
|
||||
metaDataBuilder.indexGraveyard(currentGraveyard); // the new graveyard set on the metadata
|
||||
logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.",
|
||||
graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size());
|
||||
|
||||
MetaData newMetaData = metaDataBuilder.build();
|
||||
ClusterBlocks blocks = clusterBlocksBuilder.build();
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(
|
||||
ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build(),
|
||||
"deleted indices [" + indices + "]");
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,13 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction.NewAliasValidator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -38,11 +40,16 @@ import org.elasticsearch.index.NodeServicesProvider;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting add and remove aliases requests
|
||||
|
@ -57,108 +64,113 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
|
||||
private final MetaDataDeleteIndexService deleteIndexService;
|
||||
|
||||
@Inject
|
||||
public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService, AliasValidator aliasValidator, NodeServicesProvider nodeServicesProvider) {
|
||||
public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService,
|
||||
AliasValidator aliasValidator, NodeServicesProvider nodeServicesProvider, MetaDataDeleteIndexService deleteIndexService) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
this.aliasValidator = aliasValidator;
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.deleteIndexService = deleteIndexService;
|
||||
}
|
||||
|
||||
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request,
|
||||
final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
List<Index> indicesToClose = new ArrayList<>();
|
||||
Map<String, IndexService> indices = new HashMap<>();
|
||||
try {
|
||||
for (AliasAction aliasAction : request.actions()) {
|
||||
aliasValidator.validateAliasAction(aliasAction, currentState.metaData());
|
||||
if (!currentState.metaData().hasIndex(aliasAction.index())) {
|
||||
throw new IndexNotFoundException(aliasAction.index());
|
||||
}
|
||||
}
|
||||
|
||||
boolean changed = false;
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (AliasAction aliasAction : request.actions()) {
|
||||
IndexMetaData indexMetaData = builder.get(aliasAction.index());
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(aliasAction.index());
|
||||
}
|
||||
// TODO: not copy (putAll)
|
||||
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
if (aliasAction.actionType() == AliasAction.Type.ADD) {
|
||||
String filter = aliasAction.filter();
|
||||
if (Strings.hasLength(filter)) {
|
||||
// parse the filter, in order to validate it
|
||||
IndexService indexService = indices.get(indexMetaData.getIndex());
|
||||
if (indexService == null) {
|
||||
indexService = indicesService.indexService(indexMetaData.getIndex());
|
||||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
|
||||
continue;
|
||||
}
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
}
|
||||
indices.put(indexMetaData.getIndex().getName(), indexService);
|
||||
}
|
||||
|
||||
aliasValidator.validateAliasFilter(aliasAction.alias(), filter, indexService.newQueryShardContext());
|
||||
}
|
||||
AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(
|
||||
aliasAction.alias())
|
||||
.filter(filter)
|
||||
.indexRouting(aliasAction.indexRouting())
|
||||
.searchRouting(aliasAction.searchRouting())
|
||||
.build();
|
||||
// Check if this alias already exists
|
||||
AliasMetaData aliasMd = indexMetaData.getAliases().get(aliasAction.alias());
|
||||
if (aliasMd != null && aliasMd.equals(newAliasMd)) {
|
||||
// It's the same alias - ignore it
|
||||
continue;
|
||||
}
|
||||
indexMetaDataBuilder.putAlias(newAliasMd);
|
||||
} else if (aliasAction.actionType() == AliasAction.Type.REMOVE) {
|
||||
if (!indexMetaData.getAliases().containsKey(aliasAction.alias())) {
|
||||
// This alias doesn't exist - ignore
|
||||
continue;
|
||||
}
|
||||
indexMetaDataBuilder.removeAlias(aliasAction.alias());
|
||||
}
|
||||
changed = true;
|
||||
builder.put(indexMetaDataBuilder);
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
ClusterState updatedState = ClusterState.builder(currentState).metaData(builder).build();
|
||||
// even though changes happened, they resulted in 0 actual changes to metadata
|
||||
// i.e. remove and add the same alias to the same index
|
||||
if (!updatedState.metaData().equalsAliases(currentState.metaData())) {
|
||||
return updatedState;
|
||||
}
|
||||
}
|
||||
return currentState;
|
||||
} finally {
|
||||
for (Index index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for alias processing");
|
||||
}
|
||||
}
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return innerExecute(currentState, request.actions());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ClusterState innerExecute(ClusterState currentState, Iterable<AliasAction> actions) {
|
||||
List<Index> indicesToClose = new ArrayList<>();
|
||||
Map<String, IndexService> indices = new HashMap<>();
|
||||
try {
|
||||
boolean changed = false;
|
||||
// Gather all the indexes that must be removed first so:
|
||||
// 1. We don't cause error when attempting to replace an index with a alias of the same name.
|
||||
// 2. We don't allow removal of aliases from indexes that we're just going to delete anyway. That'd be silly.
|
||||
Set<Index> indicesToDelete = new HashSet<>();
|
||||
for (AliasAction action : actions) {
|
||||
if (action.removeIndex()) {
|
||||
IndexMetaData index = currentState.metaData().getIndices().get(action.getIndex());
|
||||
if (index == null) {
|
||||
throw new IndexNotFoundException(action.getIndex());
|
||||
}
|
||||
indicesToDelete.add(index.getIndex());
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
// Remove the indexes if there are any to remove
|
||||
if (changed) {
|
||||
currentState = deleteIndexService.deleteIndices(currentState, indicesToDelete);
|
||||
}
|
||||
MetaData.Builder metadata = MetaData.builder(currentState.metaData());
|
||||
// Run the remaining alias actions
|
||||
for (AliasAction action : actions) {
|
||||
if (action.removeIndex()) {
|
||||
// Handled above
|
||||
continue;
|
||||
}
|
||||
IndexMetaData index = metadata.get(action.getIndex());
|
||||
if (index == null) {
|
||||
throw new IndexNotFoundException(action.getIndex());
|
||||
}
|
||||
NewAliasValidator newAliasValidator = (alias, indexRouting, filter) -> {
|
||||
/* It is important that we look up the index using the metadata builder we are modifying so we can remove an
|
||||
* index and replace it with an alias. */
|
||||
Function<String, IndexMetaData> indexLookup = name -> metadata.get(name);
|
||||
aliasValidator.validateAlias(alias, action.getIndex(), indexRouting, indexLookup);
|
||||
if (Strings.hasLength(filter)) {
|
||||
IndexService indexService = indices.get(index.getIndex());
|
||||
if (indexService == null) {
|
||||
indexService = indicesService.indexService(index.getIndex());
|
||||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, index, emptyList());
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> cursor : index.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(),
|
||||
MapperService.MergeReason.MAPPING_RECOVERY, false);
|
||||
}
|
||||
indicesToClose.add(index.getIndex());
|
||||
}
|
||||
indices.put(action.getIndex(), indexService);
|
||||
}
|
||||
aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext());
|
||||
}
|
||||
};
|
||||
changed |= action.apply(newAliasValidator, metadata, index);
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
ClusterState updatedState = ClusterState.builder(currentState).metaData(metadata).build();
|
||||
// even though changes happened, they resulted in 0 actual changes to metadata
|
||||
// i.e. remove and add the same alias to the same index
|
||||
if (!updatedState.metaData().equalsAliases(currentState.metaData())) {
|
||||
return updatedState;
|
||||
}
|
||||
}
|
||||
return currentState;
|
||||
} finally {
|
||||
for (Index index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for alias processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||
|
@ -193,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state", e, index);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
@ -207,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
(source, e) -> logger.warn("failure during [{}]", e, source)
|
||||
(source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
|
@ -452,7 +452,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
*
|
||||
* @return the started shard
|
||||
*/
|
||||
public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
|
||||
public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
|
||||
ensureMutable();
|
||||
ShardRouting startedShard = started(initializingShard);
|
||||
logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard);
|
||||
|
@ -484,7 +484,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
|
||||
*
|
||||
*/
|
||||
public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
|
||||
public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
|
||||
RoutingChangesObserver routingChangesObserver) {
|
||||
ensureMutable();
|
||||
assert failedShard.assignedToNode() : "only assigned shards can be failed";
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
|
@ -113,16 +115,16 @@ public class RoutingService extends AbstractLifecycleComponent {
|
|||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint());
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
|
||||
} else {
|
||||
logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version());
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint());
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue