Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
b9c04adb29
|
@ -284,6 +284,10 @@ public class SnippetsTask extends DefaultTask {
|
|||
contents.append(line).append('\n')
|
||||
return
|
||||
}
|
||||
// Allow line continuations for console snippets within lists
|
||||
if (snippet != null && line.trim() == '+') {
|
||||
return
|
||||
}
|
||||
// Just finished
|
||||
emit()
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ jts = 1.15.0
|
|||
jackson = 2.8.10
|
||||
snakeyaml = 1.17
|
||||
# when updating log4j, please update also docs/java-api/index.asciidoc
|
||||
log4j = 2.9.1
|
||||
log4j = 2.11.1
|
||||
slf4j = 1.6.2
|
||||
|
||||
# when updating the JNA version, also update the version in buildSrc/build.gradle
|
||||
|
|
|
@ -19,11 +19,25 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseResponse;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
|
@ -54,7 +68,7 @@ public class LicenseClient {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates license for the cluster cluster.
|
||||
* Asynchronously updates license for the cluster.
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
|
@ -63,4 +77,59 @@ public class LicenseClient {
|
|||
PutLicenseResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current license for the cluster.
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public GetLicenseResponse getLicense(GetLicenseRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequest(request, RequestConverters::getLicense, options,
|
||||
response -> new GetLicenseResponse(convertResponseToJson(response)), emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously returns the current license for the cluster cluster.
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener<GetLicenseResponse> listener) {
|
||||
restHighLevelClient.performRequestAsync(request, RequestConverters::getLicense, options,
|
||||
response -> new GetLicenseResponse(convertResponseToJson(response)), listener, emptySet());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Converts an entire response into a json sting
|
||||
*
|
||||
* This is useful for responses that we don't parse on the client side, but instead work as string
|
||||
* such as in case of the license JSON
|
||||
*/
|
||||
static String convertResponseToJson(Response response) throws IOException {
|
||||
HttpEntity entity = response.getEntity();
|
||||
if (entity == null) {
|
||||
throw new IllegalStateException("Response body expected but not returned");
|
||||
}
|
||||
if (entity.getContentType() == null) {
|
||||
throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body");
|
||||
}
|
||||
XContentType xContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue());
|
||||
if (xContentType == null) {
|
||||
throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue());
|
||||
}
|
||||
if (xContentType == XContentType.JSON) {
|
||||
// No changes is required
|
||||
return Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
} else {
|
||||
// Need to convert into JSON
|
||||
try (InputStream stream = response.getEntity().getContent();
|
||||
XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY,
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) {
|
||||
parser.nextToken();
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.copyCurrentStructure(parser);
|
||||
return Strings.toString(builder);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,6 +107,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
|
||||
import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
|
@ -1169,7 +1170,11 @@ final class RequestConverters {
|
|||
}
|
||||
|
||||
static Request putLicense(PutLicenseRequest putLicenseRequest) {
|
||||
Request request = new Request(HttpPut.METHOD_NAME, "/_xpack/license");
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("license")
|
||||
.build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(putLicenseRequest.timeout());
|
||||
parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout());
|
||||
|
@ -1180,6 +1185,18 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
|
||||
static Request getLicense(GetLicenseRequest getLicenseRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("license")
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
Params parameters = new Params(request);
|
||||
parameters.withLocal(getLicenseRequest.local());
|
||||
return request;
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.action.LatchedActionListener;
|
|||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseResponse;
|
||||
import org.elasticsearch.protocol.xpack.license.LicensesStatus;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseResponse;
|
||||
|
@ -33,6 +35,8 @@ import java.util.Map;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
@ -105,4 +109,62 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetLicense() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::get-license-execute
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
|
||||
GetLicenseResponse response = client.license().getLicense(request, RequestOptions.DEFAULT);
|
||||
//end::get-license-execute
|
||||
|
||||
//tag::get-license-response
|
||||
String currentLicense = response.getLicenseDefinition(); // <1>
|
||||
//end::get-license-response
|
||||
|
||||
assertThat(currentLicense, containsString("trial"));
|
||||
assertThat(currentLicense, containsString("client_rest-high-level_integTestCluster"));
|
||||
}
|
||||
{
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
// tag::get-license-execute-listener
|
||||
ActionListener<GetLicenseResponse> listener = new ActionListener<GetLicenseResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetLicenseResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-license-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-license-execute-async
|
||||
client.license().getLicenseAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-license-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
{
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
// Make sure that it still works in other formats
|
||||
builder.addHeader("Accept", randomFrom("application/smile", "application/cbor"));
|
||||
RequestOptions options = builder.build();
|
||||
GetLicenseResponse response = client.license().getLicense(request, options);
|
||||
String currentLicense = response.getLicenseDefinition();
|
||||
assertThat(currentLicense, startsWith("{"));
|
||||
assertThat(currentLicense, containsString("trial"));
|
||||
assertThat(currentLicense, containsString("client_rest-high-level_integTestCluster"));
|
||||
assertThat(currentLicense, endsWith("}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ You need to also include Log4j 2 dependencies:
|
|||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<version>2.11.1</version>
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -109,7 +109,7 @@ If you want to use another logger than Log4j 2, you can use http://www.slf4j.org
|
|||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-to-slf4j</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<version>2.11.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
[[java-rest-high-get-license]]
|
||||
=== Get License
|
||||
|
||||
[[java-rest-high-get-license-execution]]
|
||||
==== Execution
|
||||
|
||||
The license can be added or updated using the `getLicense()` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-get-license-response]]
|
||||
==== Response
|
||||
|
||||
The returned `GetLicenseResponse` contains the license in the JSON format.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-response]
|
||||
--------------------------------------------------
|
||||
<1> The text of the license.
|
||||
|
||||
[[java-rest-high-get-license-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
This request can be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetLicenseRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `GetLicenseResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/LicensingDocumentationIT.java[get-license-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -4,9 +4,9 @@
|
|||
{es} is also available as Docker images.
|
||||
The images use https://hub.docker.com/_/centos/[centos:7] as the base image.
|
||||
|
||||
A list of all published Docker images and tags can be found in
|
||||
https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found
|
||||
on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub].
|
||||
A list of all published Docker images and tags is available at
|
||||
https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in
|
||||
https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub].
|
||||
|
||||
These images are free to use under the Elastic license. They contain open source
|
||||
and free commercial features and access to paid commercial features.
|
||||
|
@ -29,15 +29,13 @@ endif::[]
|
|||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
||||
For example, the Docker image can be retrieved with the following command:
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
--------------------------------------------
|
||||
docker pull {docker-repo}:{version}
|
||||
--------------------------------------------
|
||||
|
||||
Alternatively, you can download other Docker images that contain only features
|
||||
that are available under the Apache 2.0 license from
|
||||
available under the Apache 2.0 license. To download the images, go to
|
||||
https://www.docker.elastic.co[www.docker.elastic.co].
|
||||
|
||||
endif::[]
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.grok;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
|
@ -104,6 +106,8 @@ public interface ThreadWatchdog {
|
|||
private final long maxExecutionTime;
|
||||
private final LongSupplier relativeTimeSupplier;
|
||||
private final BiFunction<Long, Runnable, ScheduledFuture<?>> scheduler;
|
||||
private final AtomicInteger registered = new AtomicInteger(0);
|
||||
private final AtomicBoolean running = new AtomicBoolean(false);
|
||||
final ConcurrentHashMap<Thread, Long> registry = new ConcurrentHashMap<>();
|
||||
|
||||
private Default(long interval,
|
||||
|
@ -114,11 +118,14 @@ public interface ThreadWatchdog {
|
|||
this.maxExecutionTime = maxExecutionTime;
|
||||
this.relativeTimeSupplier = relativeTimeSupplier;
|
||||
this.scheduler = scheduler;
|
||||
scheduler.apply(interval, this::interruptLongRunningExecutions);
|
||||
}
|
||||
|
||||
public void register() {
|
||||
registered.getAndIncrement();
|
||||
Long previousValue = registry.put(Thread.currentThread(), relativeTimeSupplier.getAsLong());
|
||||
if (running.compareAndSet(false, true) == true) {
|
||||
scheduler.apply(interval, this::interruptLongRunningExecutions);
|
||||
}
|
||||
assert previousValue == null;
|
||||
}
|
||||
|
||||
|
@ -129,6 +136,7 @@ public interface ThreadWatchdog {
|
|||
|
||||
public void unregister() {
|
||||
Long previousValue = registry.remove(Thread.currentThread());
|
||||
registered.decrementAndGet();
|
||||
assert previousValue != null;
|
||||
}
|
||||
|
||||
|
@ -140,7 +148,11 @@ public interface ThreadWatchdog {
|
|||
// not removing the entry here, this happens in the unregister() method.
|
||||
}
|
||||
}
|
||||
if (registered.get() > 0) {
|
||||
scheduler.apply(interval, this::interruptLongRunningExecutions);
|
||||
} else {
|
||||
running.set(false);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,12 +18,22 @@
|
|||
*/
|
||||
package org.elasticsearch.grok;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
||||
|
||||
public class ThreadWatchdogTests extends ESTestCase {
|
||||
|
||||
|
@ -67,4 +77,38 @@ public class ThreadWatchdogTests extends ESTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testIdleIfNothingRegistered() throws Exception {
|
||||
long interval = 1L;
|
||||
ScheduledExecutorService threadPool = mock(ScheduledExecutorService.class);
|
||||
ThreadWatchdog watchdog = ThreadWatchdog.newInstance(interval, Long.MAX_VALUE, System::currentTimeMillis,
|
||||
(delay, command) -> threadPool.schedule(command, delay, TimeUnit.MILLISECONDS));
|
||||
// Periodic action is not scheduled because no thread is registered
|
||||
verifyZeroInteractions(threadPool);
|
||||
CompletableFuture<Runnable> commandFuture = new CompletableFuture<>();
|
||||
// Periodic action is scheduled because a thread is registered
|
||||
doAnswer(invocationOnMock -> {
|
||||
commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]);
|
||||
return null;
|
||||
}).when(threadPool).schedule(
|
||||
any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)
|
||||
);
|
||||
watchdog.register();
|
||||
// Registering the first thread should have caused the command to get scheduled again
|
||||
Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS);
|
||||
Mockito.reset(threadPool);
|
||||
watchdog.unregister();
|
||||
command.run();
|
||||
// Periodic action is not scheduled again because no thread is registered
|
||||
verifyZeroInteractions(threadPool);
|
||||
watchdog.register();
|
||||
Thread otherThread = new Thread(watchdog::register);
|
||||
try {
|
||||
verify(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS));
|
||||
// Registering a second thread does not cause the command to get scheduled twice
|
||||
verifyNoMoreInteractions(threadPool);
|
||||
otherThread.start();
|
||||
} finally {
|
||||
otherThread.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.io.OutputStream;
|
|||
*/
|
||||
public class XContentFactory {
|
||||
|
||||
private static final int GUESS_HEADER_LENGTH = 20;
|
||||
static final int GUESS_HEADER_LENGTH = 20;
|
||||
|
||||
/**
|
||||
* Returns a content builder using JSON format ({@link org.elasticsearch.common.xcontent.XContentType#JSON}.
|
||||
|
@ -153,8 +153,10 @@ public class XContentFactory {
|
|||
return XContentType.JSON;
|
||||
}
|
||||
// Should we throw a failure here? Smile idea is to use it in bytes....
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 &&
|
||||
content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
|
||||
if (length > 2
|
||||
&& first == SmileConstants.HEADER_BYTE_1
|
||||
&& content.charAt(1) == SmileConstants.HEADER_BYTE_2
|
||||
&& content.charAt(2) == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') {
|
||||
|
@ -227,13 +229,29 @@ public class XContentFactory {
|
|||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(InputStream si) throws IOException {
|
||||
/*
|
||||
* We need to guess the content type. To do this, we look for the first non-whitespace character and then try to guess the content
|
||||
* type on the GUESS_HEADER_LENGTH bytes that follow. We do this in a way that does not modify the initial read position in the
|
||||
* underlying input stream. This is why the input stream must support mark/reset and why we repeatedly mark the read position and
|
||||
* reset.
|
||||
*/
|
||||
if (si.markSupported() == false) {
|
||||
throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass());
|
||||
}
|
||||
si.mark(GUESS_HEADER_LENGTH);
|
||||
si.mark(Integer.MAX_VALUE);
|
||||
try {
|
||||
// scan until we find the first non-whitespace character or the end of the stream
|
||||
int current;
|
||||
do {
|
||||
current = si.read();
|
||||
if (current == -1) {
|
||||
return null;
|
||||
}
|
||||
} while (Character.isWhitespace((char) current));
|
||||
// now guess the content type off the next GUESS_HEADER_LENGTH bytes including the current byte
|
||||
final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH];
|
||||
int read = 0;
|
||||
firstBytes[0] = (byte) current;
|
||||
int read = 1;
|
||||
while (read < GUESS_HEADER_LENGTH) {
|
||||
final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read);
|
||||
if (r == -1) {
|
||||
|
@ -245,6 +263,7 @@ public class XContentFactory {
|
|||
} finally {
|
||||
si.reset();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -278,8 +297,10 @@ public class XContentFactory {
|
|||
if (first == '{') {
|
||||
return XContentType.JSON;
|
||||
}
|
||||
if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 &&
|
||||
bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
|
||||
if (length > 2
|
||||
&& first == SmileConstants.HEADER_BYTE_1
|
||||
&& bytes[offset + 1] == SmileConstants.HEADER_BYTE_2
|
||||
&& bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) {
|
||||
return XContentType.SMILE;
|
||||
}
|
||||
if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') {
|
||||
|
|
|
@ -564,7 +564,6 @@ thirdPartyAudit.excludes = [
|
|||
// we are not pulling in slf4j-ext, this is okay, Log4j will fallback gracefully
|
||||
'org.slf4j.ext.EventData',
|
||||
|
||||
'org.apache.log4j.AppenderSkeleton',
|
||||
'org.apache.log4j.AsyncAppender',
|
||||
'org.apache.log4j.helpers.ISO8601DateFormat',
|
||||
'org.apache.log4j.spi.ThrowableInformation',
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
4b41b53a3a2d299ce381a69d165381ca19f62912
|
|
@ -1 +0,0 @@
|
|||
0a97a849b18b3798c4af1a2ca5b10c66cef17e3a
|
|
@ -220,7 +220,6 @@ thirdPartyAudit.excludes = [
|
|||
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
|
||||
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
|
||||
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
|
||||
'com.fasterxml.jackson.databind.node.JsonNodeFactory',
|
||||
'com.fasterxml.jackson.databind.node.ObjectNode',
|
||||
'org.fusesource.jansi.Ansi',
|
||||
'org.fusesource.jansi.AnsiRenderer$Code',
|
||||
|
@ -262,12 +261,6 @@ thirdPartyAudit.excludes = [
|
|||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
'javax.mail.util.ByteArrayDataSource',
|
||||
'javax.persistence.AttributeConverter',
|
||||
'javax.persistence.EntityManager',
|
||||
'javax.persistence.EntityManagerFactory',
|
||||
'javax.persistence.EntityTransaction',
|
||||
'javax.persistence.Persistence',
|
||||
'javax.persistence.PersistenceException',
|
||||
'org.apache.commons.compress.compressors.CompressorStreamFactory',
|
||||
'org.apache.commons.compress.utils.IOUtils',
|
||||
'org.apache.commons.csv.CSVFormat',
|
||||
|
@ -311,6 +304,16 @@ thirdPartyAudit.excludes = [
|
|||
'com.google.common.geometry.S2LatLng',
|
||||
]
|
||||
|
||||
if (JavaVersion.current() <= JavaVersion.VERSION_1_8) {
|
||||
// Used by Log4J 2.11.1
|
||||
thirdPartyAudit.excludes += [
|
||||
'java.io.ObjectInputFilter',
|
||||
'java.io.ObjectInputFilter$Config',
|
||||
'java.io.ObjectInputFilter$FilterInfo',
|
||||
'java.io.ObjectInputFilter$Status'
|
||||
]
|
||||
}
|
||||
|
||||
if (JavaVersion.current() > JavaVersion.VERSION_1_8) {
|
||||
thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter']
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
3aba3398fe064a3eab4331f88161c7480e848418
|
|
@ -1 +0,0 @@
|
|||
894f96d677880d4ab834a1356f62b875e579caaa
|
|
@ -0,0 +1 @@
|
|||
268f0fe4df3eefe052b57c87ec48517d64fb2a10
|
|
@ -1 +0,0 @@
|
|||
7a2999229464e7a324aa503c0a52ec0f05efe7bd
|
|
@ -0,0 +1 @@
|
|||
592a48674c926b01a9a747c7831bcd82a9e6d6e4
|
|
@ -1 +0,0 @@
|
|||
c041978c686866ee8534f538c6220238db3bb6be
|
|
@ -39,6 +39,7 @@ import java.util.Collections;
|
|||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
|
@ -278,7 +279,7 @@ public final class ExceptionsHelper {
|
|||
List<ShardOperationFailedException> uniqueFailures = new ArrayList<>();
|
||||
Set<GroupBy> reasons = new HashSet<>();
|
||||
for (ShardOperationFailedException failure : failures) {
|
||||
GroupBy reason = new GroupBy(failure.getCause());
|
||||
GroupBy reason = new GroupBy(failure);
|
||||
if (reasons.contains(reason) == false) {
|
||||
reasons.add(reason);
|
||||
uniqueFailures.add(failure);
|
||||
|
@ -287,46 +288,52 @@ public final class ExceptionsHelper {
|
|||
return uniqueFailures.toArray(new ShardOperationFailedException[0]);
|
||||
}
|
||||
|
||||
static class GroupBy {
|
||||
private static class GroupBy {
|
||||
final String reason;
|
||||
final String index;
|
||||
final Class<? extends Throwable> causeType;
|
||||
|
||||
GroupBy(Throwable t) {
|
||||
if (t instanceof ElasticsearchException) {
|
||||
final Index index = ((ElasticsearchException) t).getIndex();
|
||||
GroupBy(ShardOperationFailedException failure) {
|
||||
Throwable cause = failure.getCause();
|
||||
//the index name from the failure contains the cluster alias when using CCS. Ideally failures should be grouped by
|
||||
//index name and cluster alias. That's why the failure index name has the precedence over the one coming from the cause,
|
||||
//which does not include the cluster alias.
|
||||
String indexName = failure.index();
|
||||
if (indexName == null) {
|
||||
if (cause instanceof ElasticsearchException) {
|
||||
final Index index = ((ElasticsearchException) cause).getIndex();
|
||||
if (index != null) {
|
||||
this.index = index.getName();
|
||||
} else {
|
||||
this.index = null;
|
||||
indexName = index.getName();
|
||||
}
|
||||
} else {
|
||||
index = null;
|
||||
}
|
||||
reason = t.getMessage();
|
||||
causeType = t.getClass();
|
||||
}
|
||||
this.index = indexName;
|
||||
if (cause == null) {
|
||||
this.reason = failure.reason();
|
||||
this.causeType = null;
|
||||
} else {
|
||||
this.reason = cause.getMessage();
|
||||
this.causeType = cause.getClass();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
GroupBy groupBy = (GroupBy) o;
|
||||
|
||||
if (!causeType.equals(groupBy.causeType)) return false;
|
||||
if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false;
|
||||
if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false;
|
||||
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
GroupBy groupBy = (GroupBy) o;
|
||||
return Objects.equals(reason, groupBy.reason) &&
|
||||
Objects.equals(index, groupBy.index) &&
|
||||
Objects.equals(causeType, groupBy.causeType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = reason != null ? reason.hashCode() : 0;
|
||||
result = 31 * result + (index != null ? index.hashCode() : 0);
|
||||
result = 31 * result + causeType.hashCode();
|
||||
return result;
|
||||
return Objects.hash(reason, index, causeType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchException;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -66,7 +67,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
|
||||
public ShardSearchFailure(Exception e, @Nullable SearchShardTarget shardTarget) {
|
||||
final Throwable actual = ExceptionsHelper.unwrapCause(e);
|
||||
if (actual != null && actual instanceof SearchException) {
|
||||
if (actual instanceof SearchException) {
|
||||
this.shardTarget = ((SearchException) actual).shard();
|
||||
} else if (shardTarget != null) {
|
||||
this.shardTarget = shardTarget;
|
||||
|
@ -105,7 +106,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
@Override
|
||||
public String index() {
|
||||
if (shardTarget != null) {
|
||||
return shardTarget.getIndex();
|
||||
return shardTarget.getFullyQualifiedIndexName();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -186,6 +187,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
String currentFieldName = null;
|
||||
int shardId = -1;
|
||||
String indexName = null;
|
||||
String clusterAlias = null;
|
||||
String nodeId = null;
|
||||
ElasticsearchException exception = null;
|
||||
while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -196,6 +198,11 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
shardId = parser.intValue();
|
||||
} else if (INDEX_FIELD.equals(currentFieldName)) {
|
||||
indexName = parser.text();
|
||||
int indexOf = indexName.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
if (indexOf > 0) {
|
||||
clusterAlias = indexName.substring(0, indexOf);
|
||||
indexName = indexName.substring(indexOf + 1);
|
||||
}
|
||||
} else if (NODE_FIELD.equals(currentFieldName)) {
|
||||
nodeId = parser.text();
|
||||
} else {
|
||||
|
@ -214,7 +221,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
SearchShardTarget searchShardTarget = null;
|
||||
if (nodeId != null) {
|
||||
searchShardTarget = new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), null, OriginalIndices.NONE);
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), clusterAlias, OriginalIndices.NONE);
|
||||
}
|
||||
return new ShardSearchFailure(exception, searchShardTarget);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class QueryShardException extends ElasticsearchException {
|
|||
|
||||
public QueryShardException(QueryShardContext context, String msg, Throwable cause, Object... args) {
|
||||
super(msg, cause, args);
|
||||
setIndex(context.index());
|
||||
setIndex(context.getFullyQualifiedIndexName());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -79,7 +79,9 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
|
|||
final int size = reusableBuffer.getInt() + 4;
|
||||
final long maxSize = sizeInBytes() - position;
|
||||
if (size < 0 || size > maxSize) {
|
||||
throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
@ -103,13 +105,15 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
|
|||
buffer.limit(opSize);
|
||||
readBytes(buffer, position);
|
||||
buffer.flip();
|
||||
return new BufferedChecksumStreamInput(new ByteBufferStreamInput(buffer), reuse);
|
||||
return new BufferedChecksumStreamInput(new ByteBufferStreamInput(buffer), path.toString(), reuse);
|
||||
}
|
||||
|
||||
protected Translog.Operation read(BufferedChecksumStreamInput inStream) throws IOException {
|
||||
final Translog.Operation op = Translog.readOperation(inStream);
|
||||
if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) {
|
||||
throw new TranslogCorruptedException("Operation's term is newer than translog header term; " +
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"operation's term is newer than translog header term; " +
|
||||
"operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]");
|
||||
}
|
||||
return op;
|
||||
|
|
|
@ -35,14 +35,11 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
|||
private static final int SKIP_BUFFER_SIZE = 1024;
|
||||
private byte[] skipBuffer;
|
||||
private final Checksum digest;
|
||||
private final String source;
|
||||
|
||||
public BufferedChecksumStreamInput(StreamInput in) {
|
||||
super(in);
|
||||
this.digest = new BufferedChecksum(new CRC32());
|
||||
}
|
||||
|
||||
public BufferedChecksumStreamInput(StreamInput in, BufferedChecksumStreamInput reuse) {
|
||||
public BufferedChecksumStreamInput(StreamInput in, String source, BufferedChecksumStreamInput reuse) {
|
||||
super(in);
|
||||
this.source = source;
|
||||
if (reuse == null ) {
|
||||
this.digest = new BufferedChecksum(new CRC32());
|
||||
} else {
|
||||
|
@ -52,6 +49,10 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
|||
}
|
||||
}
|
||||
|
||||
public BufferedChecksumStreamInput(StreamInput in, String source) {
|
||||
this(in, source, null);
|
||||
}
|
||||
|
||||
public long getChecksum() {
|
||||
return this.digest.getValue();
|
||||
}
|
||||
|
@ -85,7 +86,6 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
|||
return delegate.markSupported();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long skip(long numBytes) throws IOException {
|
||||
if (numBytes < 0) {
|
||||
|
@ -104,7 +104,6 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
|||
return skipped;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
delegate.mark(readlimit);
|
||||
|
@ -114,4 +113,7 @@ public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
|||
digest.reset();
|
||||
}
|
||||
|
||||
public String getSource(){
|
||||
return source;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1427,7 +1427,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
long expectedChecksum = in.getChecksum();
|
||||
long readChecksum = Integer.toUnsignedLong(in.readInt());
|
||||
if (readChecksum != expectedChecksum) {
|
||||
throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" +
|
||||
throw new TranslogCorruptedException(in.getSource(), "checksum verification failed - expected: 0x" +
|
||||
Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum));
|
||||
}
|
||||
}
|
||||
|
@ -1435,10 +1435,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
/**
|
||||
* Reads a list of operations written with {@link #writeOperations(StreamOutput, List)}
|
||||
*/
|
||||
public static List<Operation> readOperations(StreamInput input) throws IOException {
|
||||
public static List<Operation> readOperations(StreamInput input, String source) throws IOException {
|
||||
ArrayList<Operation> operations = new ArrayList<>();
|
||||
int numOps = input.readInt();
|
||||
final BufferedChecksumStreamInput checksumStreamInput = new BufferedChecksumStreamInput(input);
|
||||
final BufferedChecksumStreamInput checksumStreamInput = new BufferedChecksumStreamInput(input, source);
|
||||
for (int i = 0; i < numOps; i++) {
|
||||
operations.add(readOperation(checksumStreamInput));
|
||||
}
|
||||
|
@ -1450,7 +1450,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
try {
|
||||
final int opSize = in.readInt();
|
||||
if (opSize < 4) { // 4byte for the checksum
|
||||
throw new TranslogCorruptedException("operation size must be at least 4 but was: " + opSize);
|
||||
throw new TranslogCorruptedException(in.getSource(), "operation size must be at least 4 but was: " + opSize);
|
||||
}
|
||||
in.resetDigest(); // size is not part of the checksum!
|
||||
if (in.markSupported()) { // if we can we validate the checksum first
|
||||
|
@ -1465,17 +1465,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
operation = Translog.Operation.readOperation(in);
|
||||
verifyChecksum(in);
|
||||
} catch (TranslogCorruptedException e) {
|
||||
throw e;
|
||||
} catch (EOFException e) {
|
||||
throw new TruncatedTranslogException("reached premature end of file, translog is truncated", e);
|
||||
throw new TruncatedTranslogException(in.getSource(), "reached premature end of file, translog is truncated", e);
|
||||
}
|
||||
return operation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes all operations in the given iterable to the given output stream including the size of the array
|
||||
* use {@link #readOperations(StreamInput)} to read it back.
|
||||
* use {@link #readOperations(StreamInput, String)} to read it back.
|
||||
*/
|
||||
public static void writeOperations(StreamOutput outStream, List<Operation> toWrite) throws IOException {
|
||||
final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(BigArrays.NON_RECYCLING_INSTANCE);
|
||||
|
@ -1716,7 +1714,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
} catch (TranslogCorruptedException ex) {
|
||||
throw ex; // just bubble up.
|
||||
} catch (Exception ex) {
|
||||
throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex);
|
||||
throw new TranslogCorruptedException(location.toString(), ex);
|
||||
}
|
||||
return checkpoint;
|
||||
}
|
||||
|
|
|
@ -25,12 +25,24 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import java.io.IOException;
|
||||
|
||||
public class TranslogCorruptedException extends ElasticsearchException {
|
||||
public TranslogCorruptedException(String msg) {
|
||||
super(msg);
|
||||
public TranslogCorruptedException(String source, String details) {
|
||||
super(corruptedMessage(source, details));
|
||||
}
|
||||
|
||||
public TranslogCorruptedException(String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
public TranslogCorruptedException(String source, Throwable cause) {
|
||||
this(source, null, cause);
|
||||
}
|
||||
|
||||
public TranslogCorruptedException(String source, String details, Throwable cause) {
|
||||
super(corruptedMessage(source, details), cause);
|
||||
}
|
||||
|
||||
private static String corruptedMessage(String source, String details) {
|
||||
String msg = "translog from source [" + source + "] is corrupted";
|
||||
if (details != null) {
|
||||
msg += ", " + details;
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
public TranslogCorruptedException(StreamInput in) throws IOException {
|
||||
|
|
|
@ -110,13 +110,15 @@ final class TranslogHeader {
|
|||
static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException {
|
||||
// This input is intentionally not closed because closing it will close the FileChannel.
|
||||
final BufferedChecksumStreamInput in =
|
||||
new BufferedChecksumStreamInput(new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size()));
|
||||
new BufferedChecksumStreamInput(
|
||||
new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size()),
|
||||
path.toString());
|
||||
final int version;
|
||||
try {
|
||||
version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM);
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
|
||||
tryReportOldVersionError(path, channel);
|
||||
throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e);
|
||||
throw new TranslogCorruptedException(path.toString(), "translog header corrupted", e);
|
||||
}
|
||||
if (version == VERSION_CHECKSUMS) {
|
||||
throw new IllegalStateException("pre-2.0 translog found [" + path + "]");
|
||||
|
@ -124,15 +126,19 @@ final class TranslogHeader {
|
|||
// Read the translogUUID
|
||||
final int uuidLen = in.readInt();
|
||||
if (uuidLen > channel.size()) {
|
||||
throw new TranslogCorruptedException("uuid length can't be larger than the translog");
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"UUID length can't be larger than the translog");
|
||||
}
|
||||
final BytesRef uuid = new BytesRef(uuidLen);
|
||||
uuid.length = uuidLen;
|
||||
in.read(uuid.bytes, uuid.offset, uuid.length);
|
||||
final BytesRef expectedUUID = new BytesRef(translogUUID);
|
||||
if (uuid.bytesEquals(expectedUUID) == false) {
|
||||
throw new TranslogCorruptedException("expected shard UUID " + expectedUUID + " but got: " + uuid +
|
||||
" this translog file belongs to a different translog. path:" + path);
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"expected shard UUID " + expectedUUID + " but got: " + uuid +
|
||||
" this translog file belongs to a different translog");
|
||||
}
|
||||
// Read the primary term
|
||||
final long primaryTerm;
|
||||
|
@ -164,7 +170,9 @@ final class TranslogHeader {
|
|||
// 0x00 => version 0 of the translog
|
||||
final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0];
|
||||
if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE
|
||||
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path);
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"translog looks like version 1 or later, but has corrupted header" );
|
||||
} else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE
|
||||
throw new IllegalStateException("pre-1.4 translog found [" + path + "]");
|
||||
}
|
||||
|
|
|
@ -200,8 +200,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
|||
} else if (seenSequenceNumbers.containsKey(seqNo)) {
|
||||
final Tuple<BytesReference, Exception> previous = seenSequenceNumbers.get(seqNo);
|
||||
if (previous.v1().equals(data) == false) {
|
||||
Translog.Operation newOp = Translog.readOperation(new BufferedChecksumStreamInput(data.streamInput()));
|
||||
Translog.Operation prvOp = Translog.readOperation(new BufferedChecksumStreamInput(previous.v1().streamInput()));
|
||||
Translog.Operation newOp = Translog.readOperation(
|
||||
new BufferedChecksumStreamInput(data.streamInput(), "assertion"));
|
||||
Translog.Operation prvOp = Translog.readOperation(
|
||||
new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion"));
|
||||
if (newOp.equals(prvOp) == false) {
|
||||
throw new AssertionError(
|
||||
"seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " +
|
||||
|
@ -220,7 +222,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
|||
.forEach(e -> {
|
||||
final Translog.Operation op;
|
||||
try {
|
||||
op = Translog.readOperation(new BufferedChecksumStreamInput(e.getValue().v1().streamInput()));
|
||||
op = Translog.readOperation(
|
||||
new BufferedChecksumStreamInput(e.getValue().v1().streamInput(), "assertion"));
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
|
|
@ -25,11 +25,12 @@ import java.io.IOException;
|
|||
|
||||
public class TruncatedTranslogException extends TranslogCorruptedException {
|
||||
|
||||
public TruncatedTranslogException(String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
}
|
||||
|
||||
public TruncatedTranslogException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
public TruncatedTranslogException(String source, String details, Throwable cause) {
|
||||
super(source, details, cause);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest {
|
|||
super.readFrom(in);
|
||||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
operations = Translog.readOperations(in);
|
||||
operations = Translog.readOperations(in, "recovery");
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
|
|
|
@ -806,7 +806,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
|
||||
}
|
||||
}
|
||||
if (source.scriptFields() != null) {
|
||||
if (source.scriptFields() != null && source.size() != 0) {
|
||||
int maxAllowedScriptFields = context.mapperService().getIndexSettings().getMaxScriptFields();
|
||||
if (source.scriptFields().size() > maxAllowedScriptFields) {
|
||||
throw new IllegalArgumentException(
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -32,6 +30,8 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* The target that the search request was executed on.
|
||||
*/
|
||||
|
@ -96,6 +96,13 @@ public final class SearchShardTarget implements Writeable, Comparable<SearchShar
|
|||
return clusterAlias;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the fully qualified index name, including the cluster alias.
|
||||
*/
|
||||
public String getFullyQualifiedIndexName() {
|
||||
return RemoteClusterAware.buildRemoteIndexName(getClusterAlias(), getIndex());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(SearchShardTarget o) {
|
||||
int i = shardId.getIndexName().compareTo(o.getIndex());
|
||||
|
|
|
@ -20,15 +20,27 @@
|
|||
package org.elasticsearch;
|
||||
|
||||
import org.apache.commons.codec.DecoderException;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.elasticsearch.ExceptionsHelper.MAX_ITERATIONS;
|
||||
import static org.elasticsearch.ExceptionsHelper.maybeError;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
|
||||
public class ExceptionsHelperTests extends ESTestCase {
|
||||
|
||||
|
@ -91,4 +103,107 @@ public class ExceptionsHelperTests extends ESTestCase {
|
|||
assertThat(ExceptionsHelper.status(new EsRejectedExecutionException("rejected")), equalTo(RestStatus.TOO_MANY_REQUESTS));
|
||||
}
|
||||
|
||||
public void testGroupBy() {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[]{
|
||||
createShardFailureParsingException("error", "node0", "index", 0, null),
|
||||
createShardFailureParsingException("error", "node1", "index", 1, null),
|
||||
createShardFailureParsingException("error", "node2", "index2", 2, null),
|
||||
createShardFailureParsingException("error", "node0", "index", 0, "cluster1"),
|
||||
createShardFailureParsingException("error", "node1", "index", 1, "cluster1"),
|
||||
createShardFailureParsingException("error", "node2", "index", 2, "cluster1"),
|
||||
createShardFailureParsingException("error", "node0", "index", 0, "cluster2"),
|
||||
createShardFailureParsingException("error", "node1", "index", 1, "cluster2"),
|
||||
createShardFailureParsingException("error", "node2", "index", 2, "cluster2"),
|
||||
createShardFailureParsingException("another error", "node2", "index", 2, "cluster2")
|
||||
};
|
||||
|
||||
ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures);
|
||||
assertThat(groupBy.length, equalTo(5));
|
||||
String[] expectedIndices = new String[]{"index", "index2", "cluster1:index", "cluster2:index", "cluster2:index"};
|
||||
String[] expectedErrors = new String[]{"error", "error", "error", "error", "another error"};
|
||||
int i = 0;
|
||||
for (ShardOperationFailedException shardOperationFailedException : groupBy) {
|
||||
assertThat(shardOperationFailedException.getCause().getMessage(), equalTo(expectedErrors[i]));
|
||||
assertThat(shardOperationFailedException.index(), equalTo(expectedIndices[i++]));
|
||||
}
|
||||
}
|
||||
|
||||
private static ShardSearchFailure createShardFailureParsingException(String error, String nodeId,
|
||||
String index, int shardId, String clusterAlias) {
|
||||
ParsingException ex = new ParsingException(0, 0, error, new IllegalArgumentException("some bad argument"));
|
||||
ex.setIndex(index);
|
||||
return new ShardSearchFailure(ex, createSearchShardTarget(nodeId, shardId, index, clusterAlias));
|
||||
}
|
||||
|
||||
private static SearchShardTarget createSearchShardTarget(String nodeId, int shardId, String index, String clusterAlias) {
|
||||
return new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), clusterAlias, OriginalIndices.NONE);
|
||||
}
|
||||
|
||||
public void testGroupByNullTarget() {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[] {
|
||||
createShardFailureQueryShardException("error", "index", null),
|
||||
createShardFailureQueryShardException("error", "index", null),
|
||||
createShardFailureQueryShardException("error", "index", null),
|
||||
createShardFailureQueryShardException("error", "index", "cluster1"),
|
||||
createShardFailureQueryShardException("error", "index", "cluster1"),
|
||||
createShardFailureQueryShardException("error", "index", "cluster1"),
|
||||
createShardFailureQueryShardException("error", "index", "cluster2"),
|
||||
createShardFailureQueryShardException("error", "index", "cluster2"),
|
||||
createShardFailureQueryShardException("error", "index2", null),
|
||||
createShardFailureQueryShardException("another error", "index2", null),
|
||||
};
|
||||
|
||||
ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures);
|
||||
assertThat(groupBy.length, equalTo(5));
|
||||
String[] expectedIndices = new String[]{"index", "cluster1:index", "cluster2:index", "index2", "index2"};
|
||||
String[] expectedErrors = new String[]{"error", "error", "error", "error", "another error"};
|
||||
int i = 0;
|
||||
for (ShardOperationFailedException shardOperationFailedException : groupBy) {
|
||||
assertThat(shardOperationFailedException.index(), nullValue());
|
||||
assertThat(shardOperationFailedException.getCause(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException) shardOperationFailedException.getCause();
|
||||
assertThat(elasticsearchException.getMessage(), equalTo(expectedErrors[i]));
|
||||
assertThat(elasticsearchException.getIndex().getName(), equalTo(expectedIndices[i++]));
|
||||
}
|
||||
}
|
||||
|
||||
private static ShardSearchFailure createShardFailureQueryShardException(String error, String indexName, String clusterAlias) {
|
||||
Index index = new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, indexName), "uuid");
|
||||
QueryShardException queryShardException = new QueryShardException(index, error, new IllegalArgumentException("parse error"));
|
||||
return new ShardSearchFailure(queryShardException, null);
|
||||
}
|
||||
|
||||
public void testGroupByNullCause() {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[] {
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node0", 0, "index", null)),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node1", 1, "index", null)),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node1", 1, "index2", null)),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node2", 2, "index", "cluster1")),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node1", 1, "index", "cluster1")),
|
||||
new ShardSearchFailure("a different error", createSearchShardTarget("node3", 3, "index", "cluster1"))
|
||||
};
|
||||
|
||||
ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures);
|
||||
assertThat(groupBy.length, equalTo(4));
|
||||
String[] expectedIndices = new String[]{"index", "index2", "cluster1:index", "cluster1:index"};
|
||||
String[] expectedErrors = new String[]{"error", "error", "error", "a different error"};
|
||||
|
||||
int i = 0;
|
||||
for (ShardOperationFailedException shardOperationFailedException : groupBy) {
|
||||
assertThat(shardOperationFailedException.reason(), equalTo(expectedErrors[i]));
|
||||
assertThat(shardOperationFailedException.index(), equalTo(expectedIndices[i++]));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGroupByNullIndex() {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[] {
|
||||
new ShardSearchFailure("error", null),
|
||||
new ShardSearchFailure(new IllegalArgumentException("error")),
|
||||
new ShardSearchFailure(new ParsingException(0, 0, "error", null)),
|
||||
};
|
||||
|
||||
ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures);
|
||||
assertThat(groupBy.length, equalTo(3));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest
|
|||
List<Runnable> mutators = new ArrayList<>();
|
||||
mutators.add(() -> mutation
|
||||
.masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue)));
|
||||
mutators.add(() -> mutation.timeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue)));
|
||||
mutators.add(() -> mutation.timeout(randomValueOtherThan(request.timeout().getStringRep(), ESTestCase::randomTimeValue)));
|
||||
mutators.add(() -> mutation.settings(mutateSettings(request.settings())));
|
||||
mutators.add(() -> mutation.indices(mutateIndices(request.indices())));
|
||||
mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(),
|
||||
|
|
|
@ -45,8 +45,9 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
String nodeId = randomAlphaOfLengthBetween(5, 10);
|
||||
String indexName = randomAlphaOfLengthBetween(5, 10);
|
||||
String clusterAlias = randomBoolean() ? randomAlphaOfLengthBetween(5, 10) : null;
|
||||
searchShardTarget = new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), null, null);
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), clusterAlias, OriginalIndices.NONE);
|
||||
}
|
||||
return new ShardSearchFailure(ex, searchShardTarget);
|
||||
}
|
||||
|
@ -115,4 +116,22 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
+ "}",
|
||||
xContent.utf8ToString());
|
||||
}
|
||||
|
||||
public void testToXContentWithClusterAlias() throws IOException {
|
||||
ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(0, 0, "some message", null),
|
||||
new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), "cluster1", OriginalIndices.NONE));
|
||||
BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean());
|
||||
assertEquals(
|
||||
"{\"shard\":123,"
|
||||
+ "\"index\":\"cluster1:indexName\","
|
||||
+ "\"node\":\"nodeId\","
|
||||
+ "\"reason\":{"
|
||||
+ "\"type\":\"parsing_exception\","
|
||||
+ "\"reason\":\"some message\","
|
||||
+ "\"line\":0,"
|
||||
+ "\"col\":0"
|
||||
+ "}"
|
||||
+ "}",
|
||||
xContent.utf8ToString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class LoggersTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public void append(LogEvent event) {
|
||||
lastEvent = event;
|
||||
lastEvent = event.toImmutable();
|
||||
}
|
||||
|
||||
ParameterizedMessage lastParameterizedMessage() {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -54,8 +55,21 @@ public class XContentFactoryTests extends ESTestCase {
|
|||
builder.field("field1", "value1");
|
||||
builder.endObject();
|
||||
|
||||
assertThat(XContentHelper.xContentType(BytesReference.bytes(builder)), equalTo(type));
|
||||
assertThat(XContentFactory.xContentType(BytesReference.bytes(builder).streamInput()), equalTo(type));
|
||||
final BytesReference bytes;
|
||||
if (type == XContentType.JSON && randomBoolean()) {
|
||||
final int length = randomIntBetween(0, 8 * XContentFactory.GUESS_HEADER_LENGTH);
|
||||
final String content = Strings.toString(builder);
|
||||
final StringBuilder sb = new StringBuilder(length + content.length());
|
||||
final char[] chars = new char[length];
|
||||
Arrays.fill(chars, ' ');
|
||||
sb.append(new String(chars)).append(content);
|
||||
bytes = new BytesArray(sb.toString());
|
||||
} else {
|
||||
bytes = BytesReference.bytes(builder);
|
||||
}
|
||||
|
||||
assertThat(XContentHelper.xContentType(bytes), equalTo(type));
|
||||
assertThat(XContentFactory.xContentType(bytes.streamInput()), equalTo(type));
|
||||
|
||||
// CBOR is binary, cannot use String
|
||||
if (type != XContentType.CBOR && type != XContentType.SMILE) {
|
||||
|
|
|
@ -67,6 +67,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
@ -261,7 +262,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
barrier.await();
|
||||
indexOnReplica(replicationRequest, shards, replica2, newReplica1Term);
|
||||
} catch (IllegalStateException ise) {
|
||||
assertThat(ise.getMessage(), containsString("is too old"));
|
||||
assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target")));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -303,7 +304,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
indexOnReplica(replicationRequest, shards, replica, primaryPrimaryTerm);
|
||||
successFullyIndexed.set(true);
|
||||
} catch (IllegalStateException ise) {
|
||||
assertThat(ise.getMessage(), containsString("is too old"));
|
||||
assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target")));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.test.engine.MockEngineSupport;
|
|||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -86,7 +87,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
indexRandom(false, false, false, Arrays.asList(builders)); // this one
|
||||
|
||||
// Corrupt the translog file(s)
|
||||
corruptRandomTranslogFiles();
|
||||
corruptRandomTranslogFile();
|
||||
|
||||
// Restart the single node
|
||||
internalCluster().fullRestart();
|
||||
|
@ -102,7 +103,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
|
||||
private void corruptRandomTranslogFiles() throws IOException {
|
||||
private void corruptRandomTranslogFile() throws IOException {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
|
||||
final Index test = state.metaData().index("test").getIndex();
|
||||
|
@ -119,9 +120,12 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
String path = fsPath.getPath();
|
||||
String relativeDataLocationPath = "indices/" + test.getUUID() + "/" + Integer.toString(shardRouting.getId()) + "/translog";
|
||||
Path translogDir = PathUtils.get(path).resolve(relativeDataLocationPath);
|
||||
if (Files.isDirectory(translogDir)) {
|
||||
translogDirs.add(translogDir);
|
||||
}
|
||||
TestTranslog.corruptTranslogFiles(logger, random(), translogDirs);
|
||||
}
|
||||
Path translogDir = RandomPicks.randomFrom(random(), translogDirs);
|
||||
TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir));
|
||||
}
|
||||
|
||||
/** Disables translog flushing for the specified index */
|
||||
|
|
|
@ -34,8 +34,6 @@ import java.nio.file.DirectoryStream;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
@ -45,7 +43,8 @@ import java.util.regex.Pattern;
|
|||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.core.IsNot.not;
|
||||
|
||||
/**
|
||||
* Helpers for testing translog.
|
||||
|
@ -54,42 +53,31 @@ public class TestTranslog {
|
|||
static final Pattern TRANSLOG_FILE_PATTERN = Pattern.compile("translog-(\\d+)\\.tlog");
|
||||
|
||||
/**
|
||||
* Corrupts some translog files (translog-N.tlog) from the given translog directories.
|
||||
* Corrupts random translog file (translog-N.tlog) from the given translog directory.
|
||||
*
|
||||
* @return a collection of tlog files that have been corrupted.
|
||||
* @return a translog file which has been corrupted.
|
||||
*/
|
||||
public static Set<Path> corruptTranslogFiles(Logger logger, Random random, Collection<Path> translogDirs) throws IOException {
|
||||
public static Path corruptRandomTranslogFile(Logger logger, Random random, Path translogDir, long minGeneration) throws
|
||||
IOException {
|
||||
Set<Path> candidates = new TreeSet<>(); // TreeSet makes sure iteration order is deterministic
|
||||
for (Path translogDir : translogDirs) {
|
||||
if (Files.isDirectory(translogDir)) {
|
||||
final long minUsedTranslogGen = minTranslogGenUsedInRecovery(translogDir);
|
||||
logger.info("--> Translog dir [{}], minUsedTranslogGen [{}]", translogDir, minUsedTranslogGen);
|
||||
logger.info("--> Translog dir [{}], minUsedTranslogGen [{}]", translogDir, minGeneration);
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(translogDir)) {
|
||||
for (Path item : stream) {
|
||||
if (Files.isRegularFile(item)) {
|
||||
// Makes sure that we will corrupt tlog files that are referenced by the Checkpoint.
|
||||
final Matcher matcher = TRANSLOG_FILE_PATTERN.matcher(item.getFileName().toString());
|
||||
if (matcher.matches() && Long.parseLong(matcher.group(1)) >= minUsedTranslogGen) {
|
||||
if (matcher.matches() && Long.parseLong(matcher.group(1)) >= minGeneration) {
|
||||
candidates.add(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assertThat(candidates, is(not(empty())));
|
||||
|
||||
Path corruptedFile = RandomPicks.randomFrom(random, candidates);
|
||||
corruptFile(logger, random, corruptedFile);
|
||||
return corruptedFile;
|
||||
}
|
||||
|
||||
Set<Path> corruptedFiles = new HashSet<>();
|
||||
if (!candidates.isEmpty()) {
|
||||
int corruptions = RandomNumbers.randomIntBetween(random, 5, 20);
|
||||
for (int i = 0; i < corruptions; i++) {
|
||||
Path fileToCorrupt = RandomPicks.randomFrom(random, candidates);
|
||||
corruptFile(logger, random, fileToCorrupt);
|
||||
corruptedFiles.add(fileToCorrupt);
|
||||
}
|
||||
}
|
||||
assertThat("no translog file corrupted", corruptedFiles, not(empty()));
|
||||
return corruptedFiles;
|
||||
}
|
||||
|
||||
static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException {
|
||||
try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
|
||||
|
@ -117,7 +105,7 @@ public class TestTranslog {
|
|||
/**
|
||||
* Lists all existing commits in a given index path, then read the minimum translog generation that will be used in recoverFromTranslog.
|
||||
*/
|
||||
private static long minTranslogGenUsedInRecovery(Path translogPath) throws IOException {
|
||||
public static long minTranslogGenUsedInRecovery(Path translogPath) throws IOException {
|
||||
try (NIOFSDirectory directory = new NIOFSDirectory(translogPath.getParent().resolve("index"))) {
|
||||
List<IndexCommit> commits = DirectoryReader.listCommits(directory);
|
||||
final String translogUUID = commits.get(commits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY);
|
||||
|
|
|
@ -748,7 +748,9 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
public void testTranslogChecksums() throws Exception {
|
||||
public void testTranslogCorruption() throws Exception {
|
||||
TranslogConfig config = translog.getConfig();
|
||||
String uuid = translog.getTranslogUUID();
|
||||
List<Translog.Location> locations = new ArrayList<>();
|
||||
|
||||
int translogOperations = randomIntBetween(10, 100);
|
||||
|
@ -756,23 +758,23 @@ public class TranslogTests extends ESTestCase {
|
|||
String ascii = randomAlphaOfLengthBetween(1, 50);
|
||||
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
|
||||
}
|
||||
translog.sync();
|
||||
translog.close();
|
||||
|
||||
corruptTranslogs(translogDir);
|
||||
TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, 0);
|
||||
int corruptionsCaught = 0;
|
||||
|
||||
AtomicInteger corruptionsCaught = new AtomicInteger(0);
|
||||
try (Translog translog = openTranslog(config, uuid)) {
|
||||
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
|
||||
for (Translog.Location location : locations) {
|
||||
try {
|
||||
Translog.Operation next = snapshot.next();
|
||||
assertNotNull(next);
|
||||
for (Location loc : locations) {
|
||||
snapshot.next();
|
||||
}
|
||||
}
|
||||
} catch (TranslogCorruptedException e) {
|
||||
corruptionsCaught.incrementAndGet();
|
||||
}
|
||||
}
|
||||
expectThrows(TranslogCorruptedException.class, snapshot::next);
|
||||
assertThat("at least one corruption was caused and caught", corruptionsCaught.get(), greaterThanOrEqualTo(1));
|
||||
assertThat(e.getMessage(), containsString(translogDir.toString()));
|
||||
corruptionsCaught++;
|
||||
}
|
||||
|
||||
assertThat("corruption is caught", corruptionsCaught, greaterThanOrEqualTo(1));
|
||||
}
|
||||
|
||||
public void testTruncatedTranslogs() throws Exception {
|
||||
|
@ -816,25 +818,6 @@ public class TranslogTests extends ESTestCase {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Randomly overwrite some bytes in the translog files
|
||||
*/
|
||||
private void corruptTranslogs(Path directory) throws Exception {
|
||||
Path[] files = FileSystemUtils.files(directory, "translog-*");
|
||||
for (Path file : files) {
|
||||
logger.info("--> corrupting {}...", file);
|
||||
FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE);
|
||||
int corruptions = scaledRandomIntBetween(10, 50);
|
||||
for (int i = 0; i < corruptions; i++) {
|
||||
// note: with the current logic, this will sometimes be a no-op
|
||||
long pos = randomIntBetween(0, (int) f.size());
|
||||
ByteBuffer junk = ByteBuffer.wrap(new byte[]{randomByte()});
|
||||
f.write(junk, pos);
|
||||
}
|
||||
f.close();
|
||||
}
|
||||
}
|
||||
|
||||
private Term newUid(ParsedDocument doc) {
|
||||
return new Term("_id", Uid.encodeId(doc.id()));
|
||||
}
|
||||
|
@ -1505,7 +1488,8 @@ public class TranslogTests extends ESTestCase {
|
|||
ops.add(test);
|
||||
}
|
||||
Translog.writeOperations(out, ops);
|
||||
final List<Translog.Operation> readOperations = Translog.readOperations(out.bytes().streamInput());
|
||||
final List<Translog.Operation> readOperations = Translog.readOperations(
|
||||
out.bytes().streamInput(), "testSnapshotFromStreamInput");
|
||||
assertEquals(ops.size(), readOperations.size());
|
||||
assertEquals(ops, readOperations);
|
||||
}
|
||||
|
|
|
@ -153,9 +153,9 @@ public class TruncateTranslogIT extends ESIntegTestCase {
|
|||
// shut down the replica node to be tested later
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
|
||||
|
||||
// Corrupt the translog file(s)
|
||||
// Corrupt the translog file
|
||||
logger.info("--> corrupting translog");
|
||||
corruptRandomTranslogFiles("test");
|
||||
corruptRandomTranslogFile("test");
|
||||
|
||||
// Restart the single node
|
||||
logger.info("--> restarting node");
|
||||
|
@ -267,15 +267,16 @@ public class TruncateTranslogIT extends ESIntegTestCase {
|
|||
// sample the replica node translog dirs
|
||||
final ShardId shardId = new ShardId(resolveIndex("test"), 0);
|
||||
Set<Path> translogDirs = getTranslogDirs(replicaNode, shardId);
|
||||
Path tdir = randomFrom(translogDirs);
|
||||
|
||||
// stop the cluster nodes. we don't use full restart so the node start up order will be the same
|
||||
// and shard roles will be maintained
|
||||
internalCluster().stopRandomDataNode();
|
||||
internalCluster().stopRandomDataNode();
|
||||
|
||||
// Corrupt the translog file(s)
|
||||
// Corrupt the translog file
|
||||
logger.info("--> corrupting translog");
|
||||
TestTranslog.corruptTranslogFiles(logger, random(), translogDirs);
|
||||
TestTranslog.corruptRandomTranslogFile(logger, random(), tdir, TestTranslog.minTranslogGenUsedInRecovery(tdir));
|
||||
|
||||
// Restart the single node
|
||||
logger.info("--> starting node");
|
||||
|
@ -358,9 +359,10 @@ public class TruncateTranslogIT extends ESIntegTestCase {
|
|||
return translogDirs;
|
||||
}
|
||||
|
||||
private void corruptRandomTranslogFiles(String indexName) throws IOException {
|
||||
private void corruptRandomTranslogFile(String indexName) throws IOException {
|
||||
Set<Path> translogDirs = getTranslogDirs(indexName);
|
||||
TestTranslog.corruptTranslogFiles(logger, random(), translogDirs);
|
||||
Path translogDir = randomFrom(translogDirs);
|
||||
TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir, TestTranslog.minTranslogGenUsedInRecovery(translogDir));
|
||||
}
|
||||
|
||||
/** Disables translog flushing for the specified index */
|
||||
|
|
|
@ -20,22 +20,33 @@
|
|||
package org.elasticsearch.rest.action;
|
||||
|
||||
import com.fasterxml.jackson.core.io.JsonEOFException;
|
||||
import java.util.Arrays;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class RestActionsTests extends ESTestCase {
|
||||
|
||||
|
@ -98,9 +109,144 @@ public class RestActionsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testBuildBroadcastShardsHeader() throws IOException {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[]{
|
||||
createShardFailureParsingException("node0", 0, null),
|
||||
createShardFailureParsingException("node1", 1, null),
|
||||
createShardFailureParsingException("node2", 2, null),
|
||||
createShardFailureParsingException("node0", 0, "cluster1"),
|
||||
createShardFailureParsingException("node1", 1, "cluster1"),
|
||||
createShardFailureParsingException("node2", 2, "cluster1"),
|
||||
createShardFailureParsingException("node0", 0, "cluster2"),
|
||||
createShardFailureParsingException("node1", 1, "cluster2"),
|
||||
createShardFailureParsingException("node2", 2, "cluster2")
|
||||
};
|
||||
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
builder.prettyPrint();
|
||||
builder.startObject();
|
||||
RestActions.buildBroadcastShardsHeader(builder, ToXContent.EMPTY_PARAMS, 12, 3, 0, 9, failures);
|
||||
builder.endObject();
|
||||
assertThat(Strings.toString(builder), equalTo("{\n" +
|
||||
" \"_shards\" : {\n" +
|
||||
" \"total\" : 12,\n" +
|
||||
" \"successful\" : 3,\n" +
|
||||
" \"skipped\" : 0,\n" +
|
||||
" \"failed\" : 9,\n" +
|
||||
" \"failures\" : [\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 0,\n" +
|
||||
" \"index\" : \"index\",\n" +
|
||||
" \"node\" : \"node0\",\n" +
|
||||
" \"reason\" : {\n" +
|
||||
" \"type\" : \"parsing_exception\",\n" +
|
||||
" \"reason\" : \"error\",\n" +
|
||||
" \"index_uuid\" : \"_na_\",\n" +
|
||||
" \"index\" : \"index\",\n" +
|
||||
" \"line\" : 0,\n" +
|
||||
" \"col\" : 0,\n" +
|
||||
" \"caused_by\" : {\n" +
|
||||
" \"type\" : \"illegal_argument_exception\",\n" +
|
||||
" \"reason\" : \"some bad argument\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 0,\n" +
|
||||
" \"index\" : \"cluster1:index\",\n" +
|
||||
" \"node\" : \"node0\",\n" +
|
||||
" \"reason\" : {\n" +
|
||||
" \"type\" : \"parsing_exception\",\n" +
|
||||
" \"reason\" : \"error\",\n" +
|
||||
" \"index_uuid\" : \"_na_\",\n" +
|
||||
" \"index\" : \"index\",\n" +
|
||||
" \"line\" : 0,\n" +
|
||||
" \"col\" : 0,\n" +
|
||||
" \"caused_by\" : {\n" +
|
||||
" \"type\" : \"illegal_argument_exception\",\n" +
|
||||
" \"reason\" : \"some bad argument\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 0,\n" +
|
||||
" \"index\" : \"cluster2:index\",\n" +
|
||||
" \"node\" : \"node0\",\n" +
|
||||
" \"reason\" : {\n" +
|
||||
" \"type\" : \"parsing_exception\",\n" +
|
||||
" \"reason\" : \"error\",\n" +
|
||||
" \"index_uuid\" : \"_na_\",\n" +
|
||||
" \"index\" : \"index\",\n" +
|
||||
" \"line\" : 0,\n" +
|
||||
" \"col\" : 0,\n" +
|
||||
" \"caused_by\" : {\n" +
|
||||
" \"type\" : \"illegal_argument_exception\",\n" +
|
||||
" \"reason\" : \"some bad argument\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" ]\n" +
|
||||
" }\n" +
|
||||
"}"));
|
||||
}
|
||||
|
||||
private static ShardSearchFailure createShardFailureParsingException(String nodeId, int shardId, String clusterAlias) {
|
||||
String index = "index";
|
||||
ParsingException ex = new ParsingException(0, 0, "error", new IllegalArgumentException("some bad argument"));
|
||||
ex.setIndex(index);
|
||||
return new ShardSearchFailure(ex, createSearchShardTarget(nodeId, shardId, index, clusterAlias));
|
||||
}
|
||||
|
||||
private static SearchShardTarget createSearchShardTarget(String nodeId, int shardId, String index, String clusterAlias) {
|
||||
return new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), clusterAlias, OriginalIndices.NONE);
|
||||
}
|
||||
|
||||
public void testBuildBroadcastShardsHeaderNullCause() throws Exception {
|
||||
ShardOperationFailedException[] failures = new ShardOperationFailedException[] {
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node0", 0, "index", null)),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node1", 1, "index", null)),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node2", 2, "index", "cluster1")),
|
||||
new ShardSearchFailure("error", createSearchShardTarget("node1", 1, "index", "cluster1")),
|
||||
new ShardSearchFailure("a different error", createSearchShardTarget("node3", 3, "index", "cluster1"))
|
||||
};
|
||||
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
builder.prettyPrint();
|
||||
builder.startObject();
|
||||
RestActions.buildBroadcastShardsHeader(builder, ToXContent.EMPTY_PARAMS, 12, 3, 0, 9, failures);
|
||||
builder.endObject();
|
||||
//TODO the reason is not printed out, as a follow-up we should probably either print it out when the cause is null,
|
||||
//or even better enforce that the cause can't be null
|
||||
assertThat(Strings.toString(builder), equalTo("{\n" +
|
||||
" \"_shards\" : {\n" +
|
||||
" \"total\" : 12,\n" +
|
||||
" \"successful\" : 3,\n" +
|
||||
" \"skipped\" : 0,\n" +
|
||||
" \"failed\" : 9,\n" +
|
||||
" \"failures\" : [\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 0,\n" +
|
||||
" \"index\" : \"index\",\n" +
|
||||
" \"node\" : \"node0\"\n" +
|
||||
" },\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 2,\n" +
|
||||
" \"index\" : \"cluster1:index\",\n" +
|
||||
" \"node\" : \"node2\"\n" +
|
||||
" },\n" +
|
||||
" {\n" +
|
||||
" \"shard\" : 3,\n" +
|
||||
" \"index\" : \"cluster1:index\",\n" +
|
||||
" \"node\" : \"node3\"\n" +
|
||||
" }\n" +
|
||||
" ]\n" +
|
||||
" }\n" +
|
||||
"}"));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
return xContentRegistry;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -345,6 +345,23 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testIgnoreScriptfieldIfSizeZero() throws IOException {
|
||||
createIndex("index");
|
||||
final SearchService service = getInstanceFromNode(SearchService.class);
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
|
||||
final IndexShard indexShard = indexService.getShard(0);
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.scriptField("field" + 0,
|
||||
new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap()));
|
||||
searchSourceBuilder.size(0);
|
||||
try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) {
|
||||
assertEquals(0, context.scriptFields().fields().size());
|
||||
}
|
||||
}
|
||||
|
||||
public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin {
|
||||
@Override
|
||||
public List<QuerySpec<?>> getQueries() {
|
||||
|
|
|
@ -45,3 +45,13 @@ thirdPartyAudit.excludes = [
|
|||
'org.osgi.framework.wiring.BundleWire',
|
||||
'org.osgi.framework.wiring.BundleWiring'
|
||||
]
|
||||
|
||||
if (JavaVersion.current() <= JavaVersion.VERSION_1_8) {
|
||||
// Used by Log4J 2.11.1
|
||||
thirdPartyAudit.excludes += [
|
||||
'java.io.ObjectInputFilter',
|
||||
'java.io.ObjectInputFilter$Config',
|
||||
'java.io.ObjectInputFilter$FilterInfo',
|
||||
'java.io.ObjectInputFilter$Status'
|
||||
]
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.license;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
public class GetLicenseRequest extends MasterNodeReadRequest<GetLicenseRequest> {
|
||||
|
||||
public GetLicenseRequest() {
|
||||
}
|
||||
|
||||
public GetLicenseRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.license;
|
|||
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
|
||||
public class GetLicenseRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetLicenseRequest, GetLicenseResponse,
|
||||
GetLicenseRequestBuilder> {
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.license;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseResponse;
|
||||
|
||||
public class LicensingClient {
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.protocol.xpack.license.GetLicenseRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@ public class RollupJobCaps implements Writeable, ToXContentObject {
|
|||
jobID = job.getId();
|
||||
rollupIndex = job.getRollupIndex();
|
||||
indexPattern = job.getIndexPattern();
|
||||
Map<String, Object> dateHistoAggCap = job.getGroupConfig().getDateHisto().toAggCap();
|
||||
String dateField = job.getGroupConfig().getDateHisto().getField();
|
||||
Map<String, Object> dateHistoAggCap = job.getGroupConfig().getDateHistogram().toAggCap();
|
||||
String dateField = job.getGroupConfig().getDateHistogram().getField();
|
||||
RollupFieldCaps fieldCaps = fieldCapLookup.get(dateField);
|
||||
if (fieldCaps == null) {
|
||||
fieldCaps = new RollupFieldCaps();
|
||||
|
@ -51,9 +51,9 @@ public class RollupJobCaps implements Writeable, ToXContentObject {
|
|||
fieldCaps.addAgg(dateHistoAggCap);
|
||||
fieldCapLookup.put(dateField, fieldCaps);
|
||||
|
||||
if (job.getGroupConfig().getHisto() != null) {
|
||||
Map<String, Object> histoAggCap = job.getGroupConfig().getHisto().toAggCap();
|
||||
Arrays.stream(job.getGroupConfig().getHisto().getFields()).forEach(field -> {
|
||||
if (job.getGroupConfig().getHistogram() != null) {
|
||||
Map<String, Object> histoAggCap = job.getGroupConfig().getHistogram().toAggCap();
|
||||
Arrays.stream(job.getGroupConfig().getHistogram().getFields()).forEach(field -> {
|
||||
RollupFieldCaps caps = fieldCapLookup.get(field);
|
||||
if (caps == null) {
|
||||
caps = new RollupFieldCaps();
|
||||
|
|
|
@ -54,7 +54,7 @@ import static org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
|||
*/
|
||||
public class DateHistogramGroupConfig implements Writeable, ToXContentObject {
|
||||
|
||||
private static final String NAME = "date_histogram";
|
||||
static final String NAME = "date_histogram";
|
||||
private static final String INTERVAL = "interval";
|
||||
private static final String FIELD = "field";
|
||||
public static final String TIME_ZONE = "time_zone";
|
||||
|
|
|
@ -13,17 +13,21 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
/**
|
||||
* The configuration object for the groups section in the rollup config.
|
||||
|
@ -38,64 +42,85 @@ import static java.util.Arrays.asList;
|
|||
* }
|
||||
*/
|
||||
public class GroupConfig implements Writeable, ToXContentObject {
|
||||
private static final String NAME = "grouping_config";
|
||||
private static final ParseField DATE_HISTO = new ParseField("date_histogram");
|
||||
private static final ParseField HISTO = new ParseField("histogram");
|
||||
private static final ParseField TERMS = new ParseField("terms");
|
||||
|
||||
private final DateHistogramGroupConfig dateHisto;
|
||||
private final HistogramGroupConfig histo;
|
||||
private final TermsGroupConfig terms;
|
||||
|
||||
public static final ObjectParser<GroupConfig.Builder, Void> PARSER = new ObjectParser<>(NAME, GroupConfig.Builder::new);
|
||||
|
||||
public static final String NAME = "groups";
|
||||
private static final ConstructingObjectParser<GroupConfig, Void> PARSER;
|
||||
static {
|
||||
PARSER.declareObject(GroupConfig.Builder::setDateHisto, (p,c) -> DateHistogramGroupConfig.fromXContent(p), DATE_HISTO);
|
||||
PARSER.declareObject(GroupConfig.Builder::setHisto, (p,c) -> HistogramGroupConfig.fromXContent(p), HISTO);
|
||||
PARSER.declareObject(GroupConfig.Builder::setTerms, (p,c) -> TermsGroupConfig.fromXContent(p), TERMS);
|
||||
PARSER = new ConstructingObjectParser<>(NAME, args ->
|
||||
new GroupConfig((DateHistogramGroupConfig) args[0], (HistogramGroupConfig) args[1], (TermsGroupConfig) args[2]));
|
||||
PARSER.declareObject(constructorArg(),
|
||||
(p, c) -> DateHistogramGroupConfig.fromXContent(p), new ParseField(DateHistogramGroupConfig.NAME));
|
||||
PARSER.declareObject(optionalConstructorArg(),
|
||||
(p, c) -> HistogramGroupConfig.fromXContent(p), new ParseField(HistogramGroupConfig.NAME));
|
||||
PARSER.declareObject(optionalConstructorArg(),
|
||||
(p, c) -> TermsGroupConfig.fromXContent(p), new ParseField(TermsGroupConfig.NAME));
|
||||
}
|
||||
|
||||
private GroupConfig(DateHistogramGroupConfig dateHisto, @Nullable HistogramGroupConfig histo, @Nullable TermsGroupConfig terms) {
|
||||
this.dateHisto = Objects.requireNonNull(dateHisto, "A date_histogram group is mandatory");
|
||||
this.histo = histo;
|
||||
private final DateHistogramGroupConfig dateHistogram;
|
||||
private final @Nullable HistogramGroupConfig histogram;
|
||||
private final @Nullable TermsGroupConfig terms;
|
||||
|
||||
public GroupConfig(final DateHistogramGroupConfig dateHistogram) {
|
||||
this(dateHistogram, null, null);
|
||||
}
|
||||
|
||||
public GroupConfig(final DateHistogramGroupConfig dateHistogram,
|
||||
final @Nullable HistogramGroupConfig histogram,
|
||||
final @Nullable TermsGroupConfig terms) {
|
||||
if (dateHistogram == null) {
|
||||
throw new IllegalArgumentException("Date histogram must not be null");
|
||||
}
|
||||
this.dateHistogram = dateHistogram;
|
||||
this.histogram = histogram;
|
||||
this.terms = terms;
|
||||
}
|
||||
|
||||
GroupConfig(StreamInput in) throws IOException {
|
||||
dateHisto = new DateHistogramGroupConfig(in);
|
||||
histo = in.readOptionalWriteable(HistogramGroupConfig::new);
|
||||
GroupConfig(final StreamInput in) throws IOException {
|
||||
dateHistogram = new DateHistogramGroupConfig(in);
|
||||
histogram = in.readOptionalWriteable(HistogramGroupConfig::new);
|
||||
terms = in.readOptionalWriteable(TermsGroupConfig::new);
|
||||
}
|
||||
|
||||
public DateHistogramGroupConfig getDateHisto() {
|
||||
return dateHisto;
|
||||
/**
|
||||
* @return the configuration of the date histogram
|
||||
*/
|
||||
public DateHistogramGroupConfig getDateHistogram() {
|
||||
return dateHistogram;
|
||||
}
|
||||
|
||||
public HistogramGroupConfig getHisto() {
|
||||
return histo;
|
||||
/**
|
||||
* @return the configuration of the histogram
|
||||
*/
|
||||
@Nullable
|
||||
public HistogramGroupConfig getHistogram() {
|
||||
return histogram;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the configuration of the terms
|
||||
*/
|
||||
@Nullable
|
||||
public TermsGroupConfig getTerms() {
|
||||
return terms;
|
||||
}
|
||||
|
||||
public Set<String> getAllFields() {
|
||||
Set<String> fields = new HashSet<>();
|
||||
fields.add(dateHisto.getField());
|
||||
if (histo != null) {
|
||||
fields.addAll(asList(histo.getFields()));
|
||||
fields.add(dateHistogram.getField());
|
||||
if (histogram != null) {
|
||||
fields.addAll(asList(histogram.getFields()));
|
||||
}
|
||||
if (terms != null) {
|
||||
fields.addAll(asList(terms.getFields()));
|
||||
}
|
||||
return fields;
|
||||
return Collections.unmodifiableSet(fields);
|
||||
}
|
||||
|
||||
public void validateMappings(Map<String, Map<String, FieldCapabilities>> fieldCapsResponse,
|
||||
ActionRequestValidationException validationException) {
|
||||
dateHisto.validateMappings(fieldCapsResponse, validationException);
|
||||
if (histo != null) {
|
||||
histo.validateMappings(fieldCapsResponse, validationException);
|
||||
public void validateMappings(final Map<String, Map<String, FieldCapabilities>> fieldCapsResponse,
|
||||
final ActionRequestValidationException validationException) {
|
||||
dateHistogram.validateMappings(fieldCapsResponse, validationException);
|
||||
if (histogram != null) {
|
||||
histogram.validateMappings(fieldCapsResponse, validationException);
|
||||
}
|
||||
if (terms != null) {
|
||||
terms.validateMappings(fieldCapsResponse, validationException);
|
||||
|
@ -105,44 +130,43 @@ public class GroupConfig implements Writeable, ToXContentObject {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(DATE_HISTO.getPreferredName(), dateHisto);
|
||||
if (histo != null) {
|
||||
builder.field(HISTO.getPreferredName(), histo);
|
||||
{
|
||||
builder.field(DateHistogramGroupConfig.NAME, dateHistogram);
|
||||
if (histogram != null) {
|
||||
builder.field(HistogramGroupConfig.NAME, histogram);
|
||||
}
|
||||
if (terms != null) {
|
||||
builder.field(TERMS.getPreferredName(), terms);
|
||||
builder.field(TermsGroupConfig.NAME, terms);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
dateHisto.writeTo(out);
|
||||
out.writeOptionalWriteable(histo);
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
dateHistogram.writeTo(out);
|
||||
out.writeOptionalWriteable(histogram);
|
||||
out.writeOptionalWriteable(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
public boolean equals(final Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GroupConfig that = (GroupConfig) other;
|
||||
|
||||
return Objects.equals(this.dateHisto, that.dateHisto)
|
||||
&& Objects.equals(this.histo, that.histo)
|
||||
&& Objects.equals(this.terms, that.terms);
|
||||
final GroupConfig that = (GroupConfig) other;
|
||||
return Objects.equals(dateHistogram, that.dateHistogram)
|
||||
&& Objects.equals(histogram, that.histogram)
|
||||
&& Objects.equals(terms, that.terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(dateHisto, histo, terms);
|
||||
return Objects.hash(dateHistogram, histogram, terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -150,43 +174,7 @@ public class GroupConfig implements Writeable, ToXContentObject {
|
|||
return Strings.toString(this, true, true);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private DateHistogramGroupConfig dateHisto;
|
||||
private HistogramGroupConfig histo;
|
||||
private TermsGroupConfig terms;
|
||||
|
||||
public DateHistogramGroupConfig getDateHisto() {
|
||||
return dateHisto;
|
||||
}
|
||||
|
||||
public GroupConfig.Builder setDateHisto(DateHistogramGroupConfig dateHisto) {
|
||||
this.dateHisto = dateHisto;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HistogramGroupConfig getHisto() {
|
||||
return histo;
|
||||
}
|
||||
|
||||
public GroupConfig.Builder setHisto(HistogramGroupConfig histo) {
|
||||
this.histo = histo;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TermsGroupConfig getTerms() {
|
||||
return terms;
|
||||
}
|
||||
|
||||
public GroupConfig.Builder setTerms(TermsGroupConfig terms) {
|
||||
this.terms = terms;
|
||||
return this;
|
||||
}
|
||||
|
||||
public GroupConfig build() {
|
||||
if (dateHisto == null) {
|
||||
throw new IllegalArgumentException("A date_histogram group is mandatory");
|
||||
}
|
||||
return new GroupConfig(dateHisto, histo, terms);
|
||||
}
|
||||
public static GroupConfig fromXContent(final XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
|||
*/
|
||||
public class HistogramGroupConfig implements Writeable, ToXContentObject {
|
||||
|
||||
public static final String NAME = "histogram";
|
||||
static final String NAME = "histogram";
|
||||
private static final String INTERVAL = "interval";
|
||||
private static final String FIELDS = "fields";
|
||||
private static final ConstructingObjectParser<HistogramGroupConfig, Void> PARSER;
|
||||
|
|
|
@ -62,7 +62,7 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject {
|
|||
|
||||
static {
|
||||
PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID);
|
||||
PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS);
|
||||
PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.fromXContent(p), GROUPS);
|
||||
PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.fromXContent(p), METRICS);
|
||||
PARSER.declareString((params, val) ->
|
||||
params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
||||
|
|
|
@ -45,7 +45,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
|||
*/
|
||||
public class TermsGroupConfig implements Writeable, ToXContentObject {
|
||||
|
||||
private static final String NAME = "terms";
|
||||
static final String NAME = "terms";
|
||||
private static final String FIELDS = "fields";
|
||||
|
||||
private static final List<String> FLOAT_TYPES = Arrays.asList("half_float", "float", "double", "scaled_float");
|
||||
|
|
|
@ -37,7 +37,7 @@ public class ConfigTestHelpers {
|
|||
String indexPattern = ESTestCase.randomAlphaOfLengthBetween(1,10);
|
||||
builder.setIndexPattern(indexPattern);
|
||||
builder.setRollupIndex("rollup_" + indexPattern); // to ensure the index pattern != rollup index
|
||||
builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build());
|
||||
builder.setGroupConfig(ConfigTestHelpers.randomGroupConfig(ESTestCase.random()));
|
||||
builder.setPageSize(ESTestCase.randomIntBetween(1,10));
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
builder.setMetricsConfig(randomMetricsConfigs(ESTestCase.random()));
|
||||
|
@ -45,16 +45,11 @@ public class ConfigTestHelpers {
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static GroupConfig.Builder getGroupConfig() {
|
||||
GroupConfig.Builder groupBuilder = new GroupConfig.Builder();
|
||||
groupBuilder.setDateHisto(randomDateHistogramGroupConfig(ESTestCase.random()));
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
groupBuilder.setHisto(randomHistogramGroupConfig(ESTestCase.random()));
|
||||
}
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
groupBuilder.setTerms(randomTermsGroupConfig(ESTestCase.random()));
|
||||
}
|
||||
return groupBuilder;
|
||||
public static GroupConfig randomGroupConfig(final Random random) {
|
||||
DateHistogramGroupConfig dateHistogram = randomDateHistogramGroupConfig(random);
|
||||
HistogramGroupConfig histogram = random.nextBoolean() ? randomHistogramGroupConfig(random) : null;
|
||||
TermsGroupConfig terms = random.nextBoolean() ? randomTermsGroupConfig(random) : null;
|
||||
return new GroupConfig(dateHistogram, histogram, terms);
|
||||
}
|
||||
|
||||
private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"};
|
||||
|
|
|
@ -8,14 +8,16 @@ package org.elasticsearch.xpack.core.rollup.job;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig;
|
||||
|
||||
public class GroupConfigSerializingTests extends AbstractSerializingTestCase<GroupConfig> {
|
||||
|
||||
@Override
|
||||
protected GroupConfig doParseInstance(XContentParser parser) throws IOException {
|
||||
return GroupConfig.PARSER.apply(parser, null).build();
|
||||
protected GroupConfig doParseInstance(final XContentParser parser) throws IOException {
|
||||
return GroupConfig.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -25,6 +27,6 @@ public class GroupConfigSerializingTests extends AbstractSerializingTestCase<Gro
|
|||
|
||||
@Override
|
||||
protected GroupConfig createTestInstance() {
|
||||
return ConfigTestHelpers.getGroupConfig().build();
|
||||
return randomGroupConfig(random());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ public class HttpExporterSslIT extends MonitoringIntegTestCase {
|
|||
clearTransientSettings("plaintext");
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32673")
|
||||
public void testCanAddNewExporterWithSsl() {
|
||||
Path truststore = getDataPath("/org/elasticsearch/xpack/monitoring/exporter/http/testnode.jks");
|
||||
assertThat(Files.exists(truststore), CoreMatchers.is(true));
|
||||
|
|
|
@ -89,12 +89,12 @@ class IndexerUtils {
|
|||
if (k.endsWith("." + DateHistogramAggregationBuilder.NAME)) {
|
||||
assert v != null;
|
||||
doc.put(k + "." + RollupField.TIMESTAMP, v);
|
||||
doc.put(k + "." + RollupField.INTERVAL, groupConfig.getDateHisto().getInterval());
|
||||
doc.put(k + "." + DateHistogramGroupConfig.TIME_ZONE, groupConfig.getDateHisto().getTimeZone().toString());
|
||||
doc.put(k + "." + RollupField.INTERVAL, groupConfig.getDateHistogram().getInterval());
|
||||
doc.put(k + "." + DateHistogramGroupConfig.TIME_ZONE, groupConfig.getDateHistogram().getTimeZone());
|
||||
idGenerator.add((Long)v);
|
||||
} else if (k.endsWith("." + HistogramAggregationBuilder.NAME)) {
|
||||
doc.put(k + "." + RollupField.VALUE, v);
|
||||
doc.put(k + "." + RollupField.INTERVAL, groupConfig.getHisto().getInterval());
|
||||
doc.put(k + "." + RollupField.INTERVAL, groupConfig.getHistogram().getInterval());
|
||||
if (v == null) {
|
||||
idGenerator.addNull();
|
||||
} else {
|
||||
|
|
|
@ -219,7 +219,7 @@ public abstract class RollupIndexer {
|
|||
|
||||
// rounds the current time to its current bucket based on the date histogram interval.
|
||||
// this is needed to exclude buckets that can still receive new documents.
|
||||
DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHisto();
|
||||
DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram();
|
||||
long rounded = dateHisto.createRounding().round(now);
|
||||
if (dateHisto.getDelay() != null) {
|
||||
// if the job has a delay we filter all documents that appear before it.
|
||||
|
@ -396,11 +396,11 @@ public abstract class RollupIndexer {
|
|||
|
||||
// Add all the agg builders to our request in order: date_histo -> histo -> terms
|
||||
if (groupConfig != null) {
|
||||
builders.addAll(groupConfig.getDateHisto().toBuilders());
|
||||
metadata.putAll(groupConfig.getDateHisto().getMetadata());
|
||||
if (groupConfig.getHisto() != null) {
|
||||
builders.addAll(groupConfig.getHisto().toBuilders());
|
||||
metadata.putAll(groupConfig.getHisto().getMetadata());
|
||||
builders.addAll(groupConfig.getDateHistogram().toBuilders());
|
||||
metadata.putAll(groupConfig.getDateHistogram().getMetadata());
|
||||
if (groupConfig.getHistogram() != null) {
|
||||
builders.addAll(groupConfig.getHistogram().toBuilders());
|
||||
metadata.putAll(groupConfig.getHistogram().getMetadata());
|
||||
}
|
||||
if (groupConfig.getTerms() != null) {
|
||||
builders.addAll(groupConfig.getTerms().toBuilders());
|
||||
|
@ -426,7 +426,7 @@ public abstract class RollupIndexer {
|
|||
*/
|
||||
private QueryBuilder createBoundaryQuery(Map<String, Object> position) {
|
||||
assert maxBoundary < Long.MAX_VALUE;
|
||||
DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHisto();
|
||||
DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram();
|
||||
String fieldName = dateHisto.getField();
|
||||
String rollupFieldName = fieldName + "." + DateHistogramAggregationBuilder.NAME;
|
||||
long lowerBound = 0L;
|
||||
|
|
|
@ -36,14 +36,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testOneMatch() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval());
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval());
|
||||
|
||||
Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
|
||||
assertThat(bestCaps.size(), equalTo(1));
|
||||
|
@ -51,9 +50,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testBiggerButCompatibleInterval() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -66,9 +64,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testIncompatibleInterval() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -82,9 +79,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testBadTimeZone() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST"));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST"));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -99,9 +95,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testMetricOnlyAgg() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
@ -114,9 +109,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testOneOfTwoMatchingCaps() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -131,21 +125,15 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testTwoJobsSameRollupIndex() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
group.setTerms(null);
|
||||
group.setHisto(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||
caps.add(cap);
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2");
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
group2.setTerms(null);
|
||||
group2.setHisto(null);
|
||||
job2.setGroupConfig(group.build());
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job2.setGroupConfig(group);
|
||||
job2.setRollupIndex(job.getRollupIndex());
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
caps.add(cap2);
|
||||
|
@ -161,18 +149,16 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testTwoJobsButBothPartialMatches() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||
caps.add(cap);
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2");
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job2.setGroupConfig(group.build());
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job2.setGroupConfig(group);
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("min"))));
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
caps.add(cap2);
|
||||
|
@ -189,19 +175,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testComparableDifferentDateIntervals() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job2.setGroupConfig(group2.build());
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")));
|
||||
job2.setGroupConfig(group2);
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
|
@ -218,19 +198,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testComparableDifferentDateIntervalsOnlyOneWorks() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job2.setGroupConfig(group2.build());
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d")));
|
||||
job2.setGroupConfig(group2);
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
|
@ -247,19 +221,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testComparableNoHistoVsHisto() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(new HistogramGroupConfig(100L, "bar"))
|
||||
.setTerms(null);
|
||||
job2.setGroupConfig(group2.build());
|
||||
final HistogramGroupConfig histoConfig = new HistogramGroupConfig(100L, "bar");
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), histoConfig, null);
|
||||
job2.setGroupConfig(group2);
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
|
@ -277,19 +246,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
|
||||
public void testComparableNoTermsVsTerms() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(new TermsGroupConfig("bar"));
|
||||
job2.setGroupConfig(group2.build());
|
||||
final TermsGroupConfig termsConfig = new TermsGroupConfig("bar");
|
||||
final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, termsConfig);
|
||||
job2.setGroupConfig(group2);
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
|
@ -313,11 +277,12 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setGroupConfig(new GroupConfig(
|
||||
// NOTE same name but wrong type
|
||||
.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||
.build())
|
||||
new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()),
|
||||
new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name
|
||||
null
|
||||
))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
|
@ -336,9 +301,9 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.build())
|
||||
.setGroupConfig(new GroupConfig(
|
||||
new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())
|
||||
))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
|
@ -357,10 +322,10 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setGroupConfig(new GroupConfig(
|
||||
// interval in job is much higher than agg interval above
|
||||
.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID()))
|
||||
.build())
|
||||
new DateHistogramGroupConfig("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID())
|
||||
))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -377,10 +342,10 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setGroupConfig(new GroupConfig(
|
||||
// NOTE different field from the one in the query
|
||||
.setDateHisto(new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.build())
|
||||
new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())
|
||||
))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
|
@ -399,10 +364,11 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setDateHisto(new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||
.build())
|
||||
.setGroupConfig(new GroupConfig(
|
||||
new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()),
|
||||
new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name
|
||||
null
|
||||
))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
|
@ -421,10 +387,11 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||
.build())
|
||||
.setGroupConfig(new GroupConfig(
|
||||
new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()),
|
||||
new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name
|
||||
null
|
||||
))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
|
|||
import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
||||
import org.elasticsearch.xpack.rollup.Rollup;
|
||||
import org.hamcrest.core.IsEqual;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -81,6 +81,9 @@ import static org.mockito.Mockito.when;
|
|||
public class SearchActionTests extends ESTestCase {
|
||||
|
||||
private NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
|
@ -119,9 +122,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testRange() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(groupConfig);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -132,9 +134,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testRangeNullTimeZone() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -145,9 +146,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testRangeWrongTZ() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -159,9 +159,9 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testTermQuery() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setTerms(new TermsGroupConfig("foo"));
|
||||
job.setGroupConfig(group.build());
|
||||
final TermsGroupConfig termsConfig = new TermsGroupConfig("foo");
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("date", new DateHistogramInterval("1h")), null, termsConfig);
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -172,9 +172,9 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testTermsQuery() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setTerms(new TermsGroupConfig("foo"));
|
||||
job.setGroupConfig(group.build());
|
||||
final TermsGroupConfig termsConfig = new TermsGroupConfig("foo");
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("date", new DateHistogramInterval("1h")), null, termsConfig);
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -189,9 +189,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testCompounds() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(groupConfig);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -205,9 +204,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testMatchAll() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(groupConfig);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -217,10 +215,9 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testAmbiguousResolution() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
group.setTerms(new TermsGroupConfig("foo"));
|
||||
job.setGroupConfig(group.build());
|
||||
final TermsGroupConfig termsConfig = new TermsGroupConfig("foo");
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, termsConfig);
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
@ -368,9 +365,8 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testGood() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(groupConfig);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -385,7 +381,7 @@ public class SearchActionTests extends ESTestCase {
|
|||
source.query(getQueryBuilder(1));
|
||||
source.size(0);
|
||||
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()));
|
||||
SearchRequest request = new SearchRequest(combinedIndices, source);
|
||||
|
||||
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
|
||||
|
@ -414,9 +410,7 @@ public class SearchActionTests extends ESTestCase {
|
|||
SearchRequest request = new SearchRequest(combinedIndices, source);
|
||||
|
||||
RollupJobConfig job = ConfigTestHelpers.getRollupJob("foo")
|
||||
.setGroupConfig(ConfigTestHelpers.getGroupConfig()
|
||||
.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()))
|
||||
.build())
|
||||
.setGroupConfig(new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, "UTC")))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -439,15 +433,12 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testTwoMatchingJobs() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
job2.setGroupConfig(group.build());
|
||||
job2.setGroupConfig(group);
|
||||
|
||||
// so that the jobs aren't exactly equal
|
||||
job2.setMetricsConfig(ConfigTestHelpers.randomMetricsConfigs(random()));
|
||||
|
@ -468,7 +459,7 @@ public class SearchActionTests extends ESTestCase {
|
|||
source.query(getQueryBuilder(1));
|
||||
source.size(0);
|
||||
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()));
|
||||
SearchRequest request = new SearchRequest(combinedIndices, source);
|
||||
|
||||
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
|
||||
|
@ -489,19 +480,13 @@ public class SearchActionTests extends ESTestCase {
|
|||
|
||||
public void testTwoMatchingJobsOneBetter() {
|
||||
RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo");
|
||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")))
|
||||
.setHisto(null)
|
||||
.setTerms(null);
|
||||
job.setGroupConfig(group.build());
|
||||
final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
|
||||
job.setGroupConfig(group);
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
|
||||
RollupJobConfig.Builder job2 = ConfigTestHelpers.getRollupJob("foo2").setRollupIndex(job.getRollupIndex());
|
||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(group.getDateHisto())
|
||||
.setHisto(randomHistogramGroupConfig(random()))
|
||||
.setTerms(null);
|
||||
job2.setGroupConfig(group2.build());
|
||||
final GroupConfig group2 = new GroupConfig(group.getDateHistogram(), randomHistogramGroupConfig(random()), null);
|
||||
job2.setGroupConfig(group2);
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||
|
@ -519,7 +504,7 @@ public class SearchActionTests extends ESTestCase {
|
|||
source.query(getQueryBuilder(1));
|
||||
source.size(0);
|
||||
source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo")
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHisto().getInterval()));
|
||||
.dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()));
|
||||
SearchRequest request = new SearchRequest(combinedIndices, source);
|
||||
|
||||
MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx);
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.util.Map;
|
|||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig;
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomTermsGroupConfig;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
//TODO split this into dedicated unit test classes (one for each config object)
|
||||
public class ConfigTests extends ESTestCase {
|
||||
|
@ -43,22 +45,14 @@ public class ConfigTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testEmptyGroup() {
|
||||
GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig();
|
||||
groupConfig.setDateHisto(null);
|
||||
groupConfig.setTerms(null);
|
||||
groupConfig.setHisto(null);
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, groupConfig::build);
|
||||
assertThat(e.getMessage(), equalTo("A date_histogram group is mandatory"));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> new GroupConfig(null, null, null));
|
||||
assertThat(e.getMessage(), equalTo("Date histogram must not be null"));
|
||||
}
|
||||
|
||||
public void testNoDateHisto() {
|
||||
GroupConfig.Builder groupConfig = new GroupConfig.Builder();
|
||||
groupConfig.setTerms(ConfigTestHelpers.randomTermsGroupConfig(random()));
|
||||
groupConfig.setHisto(ConfigTestHelpers.randomHistogramGroupConfig(random()));
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, groupConfig::build);
|
||||
assertThat(e.getMessage(), equalTo("A date_histogram group is mandatory"));
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new GroupConfig(null, randomHistogramGroupConfig(random()), randomTermsGroupConfig(random())));
|
||||
assertThat(e.getMessage(), equalTo("Date histogram must not be null"));
|
||||
}
|
||||
|
||||
public void testEmptyGroupAndMetrics() {
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggre
|
|||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
|
||||
import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
|
||||
import org.elasticsearch.xpack.core.rollup.RollupField;
|
||||
import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig;
|
||||
import org.elasticsearch.xpack.core.rollup.job.GroupConfig;
|
||||
|
@ -54,8 +53,10 @@ import java.util.LinkedHashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomDateHistogramGroupConfig;
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig;
|
||||
import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -112,8 +113,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
indexReader.close();
|
||||
directory.close();
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats,
|
||||
ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean());
|
||||
final GroupConfig groupConfig = randomGroupConfig(random());
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
|
||||
|
||||
assertThat(docs.size(), equalTo(numDocs));
|
||||
for (IndexRequest doc : docs) {
|
||||
|
@ -179,8 +180,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
indexReader.close();
|
||||
directory.close();
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats,
|
||||
ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean());
|
||||
final GroupConfig groupConfig = randomGroupConfig(random());
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
|
||||
|
||||
assertThat(docs.size(), equalTo(numDocs));
|
||||
for (IndexRequest doc : docs) {
|
||||
|
@ -235,8 +236,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
indexReader.close();
|
||||
directory.close();
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats,
|
||||
ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean());
|
||||
final GroupConfig groupConfig = randomGroupConfig(random());
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
|
||||
|
||||
assertThat(docs.size(), equalTo(numDocs));
|
||||
for (IndexRequest doc : docs) {
|
||||
|
@ -301,8 +302,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
indexReader.close();
|
||||
directory.close();
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats,
|
||||
ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean());
|
||||
final GroupConfig groupConfig = randomGroupConfig(random());
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
|
||||
|
||||
assertThat(docs.size(), equalTo(numDocs));
|
||||
for (IndexRequest doc : docs) {
|
||||
|
@ -353,11 +354,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
|
||||
// The content of the config don't actually matter for this test
|
||||
// because the test is just looking at agg keys
|
||||
GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig();
|
||||
groupConfig.setHisto(new HistogramGroupConfig(123L, "abc"));
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(),
|
||||
groupConfig.build(), "foo", false);
|
||||
GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(123L, "abc"), null);
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false);
|
||||
assertThat(docs.size(), equalTo(1));
|
||||
assertThat(docs.get(0).id(), equalTo("1237859798"));
|
||||
}
|
||||
|
@ -400,11 +398,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
return foos;
|
||||
});
|
||||
|
||||
GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig();
|
||||
groupConfig.setHisto(new HistogramGroupConfig(1, "abc"));
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(),
|
||||
groupConfig.build(), "foo", true);
|
||||
GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null);
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true);
|
||||
assertThat(docs.size(), equalTo(1));
|
||||
assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA"));
|
||||
}
|
||||
|
@ -453,11 +448,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
return foos;
|
||||
});
|
||||
|
||||
GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig();
|
||||
groupConfig.setHisto(new HistogramGroupConfig(1, "abc"));
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(),
|
||||
groupConfig.build(), "foo", true);
|
||||
GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null);
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true);
|
||||
assertThat(docs.size(), equalTo(1));
|
||||
assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw"));
|
||||
}
|
||||
|
@ -483,11 +475,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
return foos;
|
||||
});
|
||||
|
||||
GroupConfig.Builder groupConfig = ConfigTestHelpers.getGroupConfig();
|
||||
groupConfig.setHisto(randomHistogramGroupConfig(random()));
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(),
|
||||
groupConfig.build(), "foo", randomBoolean());
|
||||
GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null);
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean());
|
||||
assertThat(docs.size(), equalTo(1));
|
||||
assertFalse(Strings.isNullOrEmpty(docs.get(0).id()));
|
||||
}
|
||||
|
@ -548,8 +537,8 @@ public class IndexerUtilsTests extends AggregatorTestCase {
|
|||
indexReader.close();
|
||||
directory.close();
|
||||
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats,
|
||||
ConfigTestHelpers.getGroupConfig().build(), "foo", randomBoolean());
|
||||
final GroupConfig groupConfig = randomGroupConfig(random());
|
||||
List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
|
||||
|
||||
assertThat(docs.size(), equalTo(6));
|
||||
for (IndexRequest doc : docs) {
|
||||
|
|
|
@ -96,8 +96,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
String rollupIndex = randomAlphaOfLength(10);
|
||||
String field = "the_histo";
|
||||
DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1ms"));
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(),
|
||||
Collections.emptyList());
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList());
|
||||
final List<Map<String, Object>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
Arrays.asList(
|
||||
|
@ -142,8 +141,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
String field = "the_histo";
|
||||
DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1h"));
|
||||
MetricConfig config = new MetricConfig("counter", Arrays.asList("avg", "sum", "max", "min"));
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(),
|
||||
Collections.singletonList(config));
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(config));
|
||||
final List<Map<String, Object>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
Arrays.asList(
|
||||
|
@ -265,8 +263,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
String field = "the_histo";
|
||||
DateHistogramGroupConfig dateHistoConfig =
|
||||
new DateHistogramGroupConfig(field, new DateHistogramInterval("1m"), new DateHistogramInterval("1h"), null);
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(),
|
||||
Collections.emptyList());
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList());
|
||||
final List<Map<String, Object>> dataset = new ArrayList<>();
|
||||
long now = System.currentTimeMillis();
|
||||
dataset.addAll(
|
||||
|
@ -347,8 +344,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
String rollupIndex = randomAlphaOfLengthBetween(5, 10);
|
||||
String field = "the_histo";
|
||||
DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1d"), null, timeZone);
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(),
|
||||
Collections.emptyList());
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList());
|
||||
|
||||
executeTestCase(dataset, job, now, (resp) -> {
|
||||
assertThat(resp.size(), equalTo(1));
|
||||
|
@ -410,8 +406,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
DateHistogramGroupConfig dateHistoConfig =
|
||||
new DateHistogramGroupConfig(timestampField, new DateHistogramInterval(timeInterval));
|
||||
MetricConfig metricConfig = new MetricConfig(valueField, Collections.singletonList("avg"));
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig.Builder().setDateHisto(dateHistoConfig).build(),
|
||||
Collections.singletonList(metricConfig));
|
||||
RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(metricConfig));
|
||||
|
||||
final List<Map<String, Object>> dataset = new ArrayList<>();
|
||||
int numDocs = randomIntBetween(1,100);
|
||||
|
@ -477,7 +472,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
Directory dir = index(docs, fieldTypeLookup);
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
String dateHistoField = config.getGroupConfig().getDateHisto().getField();
|
||||
String dateHistoField = config.getGroupConfig().getDateHistogram().getField();
|
||||
final ExecutorService executor = Executors.newFixedThreadPool(1);
|
||||
try {
|
||||
RollupJob job = new RollupJob(config, Collections.emptyMap());
|
||||
|
@ -499,14 +494,14 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
*/
|
||||
private Map<String, MappedFieldType> createFieldTypes(RollupJobConfig job) {
|
||||
Map<String, MappedFieldType> fieldTypes = new HashMap<>();
|
||||
MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHisto().getField())
|
||||
MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHistogram().getField())
|
||||
.dateTimeFormatter(Joda.forPattern(randomFrom("basic_date", "date_optional_time", "epoch_second")))
|
||||
.build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0)))
|
||||
.fieldType();
|
||||
fieldTypes.put(fieldType.name(), fieldType);
|
||||
|
||||
if (job.getGroupConfig().getHisto() != null) {
|
||||
for (String field : job.getGroupConfig().getHisto().getFields()) {
|
||||
if (job.getGroupConfig().getHistogram() != null) {
|
||||
for (String field : job.getGroupConfig().getHistogram().getFields()) {
|
||||
MappedFieldType ft = new NumberFieldMapper.Builder(field, NumberFieldMapper.NumberType.LONG)
|
||||
.build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0)))
|
||||
.fieldType();
|
||||
|
|
|
@ -273,7 +273,7 @@ public class RollupIndexerStateTests extends ESTestCase {
|
|||
// and make sure the appropriate error is thrown
|
||||
when(config.getGroupConfig()).then((Answer<GroupConfig>) invocationOnMock -> {
|
||||
state.set(IndexerState.STOPPED);
|
||||
return ConfigTestHelpers.getGroupConfig().build();
|
||||
return ConfigTestHelpers.randomGroupConfig(random());
|
||||
});
|
||||
RollupJob job = new RollupJob(config, Collections.emptyMap());
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
4b41b53a3a2d299ce381a69d165381ca19f62912
|
|
@ -1 +0,0 @@
|
|||
0a97a849b18b3798c4af1a2ca5b10c66cef17e3a
|
|
@ -76,8 +76,6 @@ thirdPartyAudit.excludes = [
|
|||
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
|
||||
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
|
||||
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
|
||||
'com.fasterxml.jackson.databind.node.JsonNodeFactory',
|
||||
'com.fasterxml.jackson.databind.node.ObjectNode',
|
||||
'com.lmax.disruptor.BlockingWaitStrategy',
|
||||
'com.lmax.disruptor.BusySpinWaitStrategy',
|
||||
'com.lmax.disruptor.EventFactory',
|
||||
|
@ -116,12 +114,6 @@ thirdPartyAudit.excludes = [
|
|||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
'javax.mail.util.ByteArrayDataSource',
|
||||
'javax.persistence.AttributeConverter',
|
||||
'javax.persistence.EntityManager',
|
||||
'javax.persistence.EntityManagerFactory',
|
||||
'javax.persistence.EntityTransaction',
|
||||
'javax.persistence.Persistence',
|
||||
'javax.persistence.PersistenceException',
|
||||
'org.apache.commons.compress.compressors.CompressorStreamFactory',
|
||||
'org.apache.commons.compress.utils.IOUtils',
|
||||
'org.apache.commons.csv.CSVFormat',
|
||||
|
@ -150,3 +142,13 @@ thirdPartyAudit.excludes = [
|
|||
'org.zeromq.ZMQ$Socket',
|
||||
'org.zeromq.ZMQ'
|
||||
]
|
||||
|
||||
if (JavaVersion.current() <= JavaVersion.VERSION_1_8) {
|
||||
// Used by Log4J 2.11.1
|
||||
thirdPartyAudit.excludes += [
|
||||
'java.io.ObjectInputFilter',
|
||||
'java.io.ObjectInputFilter$Config',
|
||||
'java.io.ObjectInputFilter$FilterInfo',
|
||||
'java.io.ObjectInputFilter$Status'
|
||||
]
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
268f0fe4df3eefe052b57c87ec48517d64fb2a10
|
|
@ -1 +0,0 @@
|
|||
7a2999229464e7a324aa503c0a52ec0f05efe7bd
|
|
@ -0,0 +1 @@
|
|||
592a48674c926b01a9a747c7831bcd82a9e6d6e4
|
|
@ -1 +0,0 @@
|
|||
c041978c686866ee8534f538c6220238db3bb6be
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.license;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
public class GetLicenseRequest extends MasterNodeReadRequest<GetLicenseRequest> {
|
||||
|
||||
public GetLicenseRequest() {
|
||||
}
|
||||
|
||||
public GetLicenseRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.license;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
||||
public class GetLicenseResponse extends ActionResponse {
|
||||
|
||||
private String license;
|
||||
|
||||
GetLicenseResponse() {
|
||||
}
|
||||
|
||||
public GetLicenseResponse(String license) {
|
||||
this.license = license;
|
||||
}
|
||||
|
||||
public String getLicenseDefinition() {
|
||||
return license;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* The description of how searches should be chunked.
|
||||
*/
|
||||
public class ChunkingConfig implements ToXContentObject {
|
||||
|
||||
public static final ParseField MODE_FIELD = new ParseField("mode");
|
||||
public static final ParseField TIME_SPAN_FIELD = new ParseField("time_span");
|
||||
|
||||
public static final ConstructingObjectParser<ChunkingConfig, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"chunking_config", true, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1]));
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Mode.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, MODE_FIELD, ValueType.STRING);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, TIME_SPAN_FIELD, ValueType.STRING);
|
||||
|
||||
}
|
||||
|
||||
private final Mode mode;
|
||||
private final TimeValue timeSpan;
|
||||
|
||||
|
||||
ChunkingConfig(Mode mode, @Nullable TimeValue timeSpan) {
|
||||
this.mode = Objects.requireNonNull(mode, MODE_FIELD.getPreferredName());
|
||||
this.timeSpan = timeSpan;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public TimeValue getTimeSpan() {
|
||||
return timeSpan;
|
||||
}
|
||||
|
||||
Mode getMode() {
|
||||
return mode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(MODE_FIELD.getPreferredName(), mode);
|
||||
if (timeSpan != null) {
|
||||
builder.field(TIME_SPAN_FIELD.getPreferredName(), timeSpan.getStringRep());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(mode, timeSpan);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ChunkingConfig other = (ChunkingConfig) obj;
|
||||
return Objects.equals(this.mode, other.mode) &&
|
||||
Objects.equals(this.timeSpan, other.timeSpan);
|
||||
}
|
||||
|
||||
public static ChunkingConfig newAuto() {
|
||||
return new ChunkingConfig(Mode.AUTO, null);
|
||||
}
|
||||
|
||||
public static ChunkingConfig newOff() {
|
||||
return new ChunkingConfig(Mode.OFF, null);
|
||||
}
|
||||
|
||||
public static ChunkingConfig newManual(TimeValue timeSpan) {
|
||||
return new ChunkingConfig(Mode.MANUAL, timeSpan);
|
||||
}
|
||||
|
||||
public enum Mode {
|
||||
AUTO, MANUAL, OFF;
|
||||
|
||||
public static Mode fromString(String value) {
|
||||
return Mode.valueOf(value.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,329 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Datafeed configuration options pojo. Describes where to proactively pull input
|
||||
* data from.
|
||||
* <p>
|
||||
* If a value has not been set it will be <code>null</code>. Object wrappers are
|
||||
* used around integral types and booleans so they can take <code>null</code>
|
||||
* values.
|
||||
*/
|
||||
public class DatafeedConfig implements ToXContentObject {
|
||||
|
||||
public static final int DEFAULT_SCROLL_SIZE = 1000;
|
||||
|
||||
public static final ParseField ID = new ParseField("datafeed_id");
|
||||
public static final ParseField QUERY_DELAY = new ParseField("query_delay");
|
||||
public static final ParseField FREQUENCY = new ParseField("frequency");
|
||||
public static final ParseField INDEXES = new ParseField("indexes");
|
||||
public static final ParseField INDICES = new ParseField("indices");
|
||||
public static final ParseField JOB_ID = new ParseField("job_id");
|
||||
public static final ParseField TYPES = new ParseField("types");
|
||||
public static final ParseField QUERY = new ParseField("query");
|
||||
public static final ParseField SCROLL_SIZE = new ParseField("scroll_size");
|
||||
public static final ParseField AGGREGATIONS = new ParseField("aggregations");
|
||||
public static final ParseField AGGS = new ParseField("aggs");
|
||||
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
|
||||
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
|
||||
|
||||
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"datafeed_config", true, a -> new Builder((String)a[0], (String)a[1]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), ID);
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), JOB_ID);
|
||||
|
||||
PARSER.declareStringArray(Builder::setIndices, INDEXES);
|
||||
PARSER.declareStringArray(Builder::setIndices, INDICES);
|
||||
PARSER.declareStringArray(Builder::setTypes, TYPES);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
|
||||
PARSER.declareString((builder, val) ->
|
||||
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
|
||||
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
|
||||
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
|
||||
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
|
||||
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
|
||||
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
|
||||
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p));
|
||||
}
|
||||
return parsedScriptFields;
|
||||
}, SCRIPT_FIELDS);
|
||||
PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE);
|
||||
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
|
||||
}
|
||||
|
||||
private final String id;
|
||||
private final String jobId;
|
||||
|
||||
/**
|
||||
* The delay before starting to query a period of time
|
||||
*/
|
||||
private final TimeValue queryDelay;
|
||||
|
||||
/**
|
||||
* The frequency with which queries are executed
|
||||
*/
|
||||
private final TimeValue frequency;
|
||||
|
||||
private final List<String> indices;
|
||||
private final List<String> types;
|
||||
private final QueryBuilder query;
|
||||
private final AggregatorFactories.Builder aggregations;
|
||||
private final List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private final Integer scrollSize;
|
||||
private final ChunkingConfig chunkingConfig;
|
||||
|
||||
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
|
||||
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
|
||||
Integer scrollSize, ChunkingConfig chunkingConfig) {
|
||||
this.id = id;
|
||||
this.jobId = jobId;
|
||||
this.queryDelay = queryDelay;
|
||||
this.frequency = frequency;
|
||||
this.indices = indices == null ? null : Collections.unmodifiableList(indices);
|
||||
this.types = types == null ? null : Collections.unmodifiableList(types);
|
||||
this.query = query;
|
||||
this.aggregations = aggregations;
|
||||
this.scriptFields = scriptFields == null ? null : Collections.unmodifiableList(scriptFields);
|
||||
this.scrollSize = scrollSize;
|
||||
this.chunkingConfig = chunkingConfig;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
public TimeValue getQueryDelay() {
|
||||
return queryDelay;
|
||||
}
|
||||
|
||||
public TimeValue getFrequency() {
|
||||
return frequency;
|
||||
}
|
||||
|
||||
public List<String> getIndices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
public List<String> getTypes() {
|
||||
return types;
|
||||
}
|
||||
|
||||
public Integer getScrollSize() {
|
||||
return scrollSize;
|
||||
}
|
||||
|
||||
public QueryBuilder getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
public AggregatorFactories.Builder getAggregations() {
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
public List<SearchSourceBuilder.ScriptField> getScriptFields() {
|
||||
return scriptFields == null ? Collections.emptyList() : scriptFields;
|
||||
}
|
||||
|
||||
public ChunkingConfig getChunkingConfig() {
|
||||
return chunkingConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(ID.getPreferredName(), id);
|
||||
builder.field(JOB_ID.getPreferredName(), jobId);
|
||||
if (queryDelay != null) {
|
||||
builder.field(QUERY_DELAY.getPreferredName(), queryDelay.getStringRep());
|
||||
}
|
||||
if (frequency != null) {
|
||||
builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep());
|
||||
}
|
||||
builder.field(INDICES.getPreferredName(), indices);
|
||||
builder.field(TYPES.getPreferredName(), types);
|
||||
builder.field(QUERY.getPreferredName(), query);
|
||||
if (aggregations != null) {
|
||||
builder.field(AGGREGATIONS.getPreferredName(), aggregations);
|
||||
}
|
||||
if (scriptFields != null) {
|
||||
builder.startObject(SCRIPT_FIELDS.getPreferredName());
|
||||
for (SearchSourceBuilder.ScriptField scriptField : scriptFields) {
|
||||
scriptField.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.field(SCROLL_SIZE.getPreferredName(), scrollSize);
|
||||
if (chunkingConfig != null) {
|
||||
builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* The lists of indices and types are compared for equality but they are not
|
||||
* sorted first so this test could fail simply because the indices and types
|
||||
* lists are in different orders.
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DatafeedConfig that = (DatafeedConfig) other;
|
||||
|
||||
return Objects.equals(this.id, that.id)
|
||||
&& Objects.equals(this.jobId, that.jobId)
|
||||
&& Objects.equals(this.frequency, that.frequency)
|
||||
&& Objects.equals(this.queryDelay, that.queryDelay)
|
||||
&& Objects.equals(this.indices, that.indices)
|
||||
&& Objects.equals(this.types, that.types)
|
||||
&& Objects.equals(this.query, that.query)
|
||||
&& Objects.equals(this.scrollSize, that.scrollSize)
|
||||
&& Objects.equals(this.aggregations, that.aggregations)
|
||||
&& Objects.equals(this.scriptFields, that.scriptFields)
|
||||
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields,
|
||||
chunkingConfig);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private String id;
|
||||
private String jobId;
|
||||
private TimeValue queryDelay;
|
||||
private TimeValue frequency;
|
||||
private List<String> indices = Collections.emptyList();
|
||||
private List<String> types = Collections.emptyList();
|
||||
private QueryBuilder query = QueryBuilders.matchAllQuery();
|
||||
private AggregatorFactories.Builder aggregations;
|
||||
private List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private Integer scrollSize = DEFAULT_SCROLL_SIZE;
|
||||
private ChunkingConfig chunkingConfig;
|
||||
|
||||
public Builder(String id, String jobId) {
|
||||
this.id = Objects.requireNonNull(id, ID.getPreferredName());
|
||||
this.jobId = Objects.requireNonNull(jobId, JOB_ID.getPreferredName());
|
||||
}
|
||||
|
||||
public Builder(DatafeedConfig config) {
|
||||
this.id = config.id;
|
||||
this.jobId = config.jobId;
|
||||
this.queryDelay = config.queryDelay;
|
||||
this.frequency = config.frequency;
|
||||
this.indices = config.indices;
|
||||
this.types = config.types;
|
||||
this.query = config.query;
|
||||
this.aggregations = config.aggregations;
|
||||
this.scriptFields = config.scriptFields;
|
||||
this.scrollSize = config.scrollSize;
|
||||
this.chunkingConfig = config.chunkingConfig;
|
||||
}
|
||||
|
||||
public Builder setIndices(List<String> indices) {
|
||||
this.indices = Objects.requireNonNull(indices, INDICES.getPreferredName());
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setTypes(List<String> types) {
|
||||
this.types = Objects.requireNonNull(types, TYPES.getPreferredName());
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setQueryDelay(TimeValue queryDelay) {
|
||||
this.queryDelay = queryDelay;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setFrequency(TimeValue frequency) {
|
||||
this.frequency = frequency;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setQuery(QueryBuilder query) {
|
||||
this.query = Objects.requireNonNull(query, QUERY.getPreferredName());
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setAggregations(AggregatorFactories.Builder aggregations) {
|
||||
this.aggregations = aggregations;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
|
||||
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
|
||||
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
|
||||
this.scriptFields = sorted;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setScrollSize(int scrollSize) {
|
||||
this.scrollSize = scrollSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setChunkingConfig(ChunkingConfig chunkingConfig) {
|
||||
this.chunkingConfig = chunkingConfig;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatafeedConfig build() {
|
||||
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
|
||||
chunkingConfig);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,310 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A datafeed update contains partial properties to update a {@link DatafeedConfig}.
|
||||
* The main difference between this class and {@link DatafeedConfig} is that here all
|
||||
* fields are nullable.
|
||||
*/
|
||||
public class DatafeedUpdate implements ToXContentObject {
|
||||
|
||||
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"datafeed_update", true, a -> new Builder((String)a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), DatafeedConfig.ID);
|
||||
|
||||
PARSER.declareString(Builder::setJobId, DatafeedConfig.JOB_ID);
|
||||
PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDEXES);
|
||||
PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDICES);
|
||||
PARSER.declareStringArray(Builder::setTypes, DatafeedConfig.TYPES);
|
||||
PARSER.declareString((builder, val) -> builder.setQueryDelay(
|
||||
TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY);
|
||||
PARSER.declareString((builder, val) -> builder.setFrequency(
|
||||
TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY);
|
||||
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY);
|
||||
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
|
||||
DatafeedConfig.AGGREGATIONS);
|
||||
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
|
||||
DatafeedConfig.AGGS);
|
||||
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
|
||||
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
|
||||
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p));
|
||||
}
|
||||
return parsedScriptFields;
|
||||
}, DatafeedConfig.SCRIPT_FIELDS);
|
||||
PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE);
|
||||
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG);
|
||||
}
|
||||
|
||||
private final String id;
|
||||
private final String jobId;
|
||||
private final TimeValue queryDelay;
|
||||
private final TimeValue frequency;
|
||||
private final List<String> indices;
|
||||
private final List<String> types;
|
||||
private final QueryBuilder query;
|
||||
private final AggregatorFactories.Builder aggregations;
|
||||
private final List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private final Integer scrollSize;
|
||||
private final ChunkingConfig chunkingConfig;
|
||||
|
||||
private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
|
||||
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
|
||||
Integer scrollSize, ChunkingConfig chunkingConfig) {
|
||||
this.id = id;
|
||||
this.jobId = jobId;
|
||||
this.queryDelay = queryDelay;
|
||||
this.frequency = frequency;
|
||||
this.indices = indices;
|
||||
this.types = types;
|
||||
this.query = query;
|
||||
this.aggregations = aggregations;
|
||||
this.scriptFields = scriptFields;
|
||||
this.scrollSize = scrollSize;
|
||||
this.chunkingConfig = chunkingConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the datafeed to update
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(DatafeedConfig.ID.getPreferredName(), id);
|
||||
addOptionalField(builder, DatafeedConfig.JOB_ID, jobId);
|
||||
if (queryDelay != null) {
|
||||
builder.field(DatafeedConfig.QUERY_DELAY.getPreferredName(), queryDelay.getStringRep());
|
||||
}
|
||||
if (frequency != null) {
|
||||
builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep());
|
||||
}
|
||||
addOptionalField(builder, DatafeedConfig.INDICES, indices);
|
||||
addOptionalField(builder, DatafeedConfig.TYPES, types);
|
||||
addOptionalField(builder, DatafeedConfig.QUERY, query);
|
||||
addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations);
|
||||
if (scriptFields != null) {
|
||||
builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName());
|
||||
for (SearchSourceBuilder.ScriptField scriptField : scriptFields) {
|
||||
scriptField.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize);
|
||||
addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
private void addOptionalField(XContentBuilder builder, ParseField field, Object value) throws IOException {
|
||||
if (value != null) {
|
||||
builder.field(field.getPreferredName(), value);
|
||||
}
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
public TimeValue getQueryDelay() {
|
||||
return queryDelay;
|
||||
}
|
||||
|
||||
public TimeValue getFrequency() {
|
||||
return frequency;
|
||||
}
|
||||
|
||||
public List<String> getIndices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
public List<String> getTypes() {
|
||||
return types;
|
||||
}
|
||||
|
||||
public Integer getScrollSize() {
|
||||
return scrollSize;
|
||||
}
|
||||
|
||||
public QueryBuilder getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
public AggregatorFactories.Builder getAggregations() {
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
public List<SearchSourceBuilder.ScriptField> getScriptFields() {
|
||||
return scriptFields == null ? Collections.emptyList() : scriptFields;
|
||||
}
|
||||
|
||||
public ChunkingConfig getChunkingConfig() {
|
||||
return chunkingConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* The lists of indices and types are compared for equality but they are not
|
||||
* sorted first so this test could fail simply because the indices and types
|
||||
* lists are in different orders.
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DatafeedUpdate that = (DatafeedUpdate) other;
|
||||
|
||||
return Objects.equals(this.id, that.id)
|
||||
&& Objects.equals(this.jobId, that.jobId)
|
||||
&& Objects.equals(this.frequency, that.frequency)
|
||||
&& Objects.equals(this.queryDelay, that.queryDelay)
|
||||
&& Objects.equals(this.indices, that.indices)
|
||||
&& Objects.equals(this.types, that.types)
|
||||
&& Objects.equals(this.query, that.query)
|
||||
&& Objects.equals(this.scrollSize, that.scrollSize)
|
||||
&& Objects.equals(this.aggregations, that.aggregations)
|
||||
&& Objects.equals(this.scriptFields, that.scriptFields)
|
||||
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields,
|
||||
chunkingConfig);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private String id;
|
||||
private String jobId;
|
||||
private TimeValue queryDelay;
|
||||
private TimeValue frequency;
|
||||
private List<String> indices;
|
||||
private List<String> types;
|
||||
private QueryBuilder query;
|
||||
private AggregatorFactories.Builder aggregations;
|
||||
private List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private Integer scrollSize;
|
||||
private ChunkingConfig chunkingConfig;
|
||||
|
||||
public Builder(String id) {
|
||||
this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName());
|
||||
}
|
||||
|
||||
public Builder(DatafeedUpdate config) {
|
||||
this.id = config.id;
|
||||
this.jobId = config.jobId;
|
||||
this.queryDelay = config.queryDelay;
|
||||
this.frequency = config.frequency;
|
||||
this.indices = config.indices;
|
||||
this.types = config.types;
|
||||
this.query = config.query;
|
||||
this.aggregations = config.aggregations;
|
||||
this.scriptFields = config.scriptFields;
|
||||
this.scrollSize = config.scrollSize;
|
||||
this.chunkingConfig = config.chunkingConfig;
|
||||
}
|
||||
|
||||
public Builder setJobId(String jobId) {
|
||||
this.jobId = jobId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setIndices(List<String> indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setTypes(List<String> types) {
|
||||
this.types = types;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setQueryDelay(TimeValue queryDelay) {
|
||||
this.queryDelay = queryDelay;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setFrequency(TimeValue frequency) {
|
||||
this.frequency = frequency;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setQuery(QueryBuilder query) {
|
||||
this.query = query;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setAggregations(AggregatorFactories.Builder aggregations) {
|
||||
this.aggregations = aggregations;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
|
||||
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
|
||||
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
|
||||
this.scriptFields = sorted;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setScrollSize(int scrollSize) {
|
||||
this.scrollSize = scrollSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setChunkingConfig(ChunkingConfig chunkingConfig) {
|
||||
this.chunkingConfig = chunkingConfig;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DatafeedUpdate build() {
|
||||
return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
|
||||
chunkingConfig);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class ChunkingConfigTests extends AbstractXContentTestCase<ChunkingConfig> {
|
||||
|
||||
@Override
|
||||
protected ChunkingConfig createTestInstance() {
|
||||
return createRandomizedChunk();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ChunkingConfig doParseInstance(XContentParser parser) {
|
||||
return ChunkingConfig.PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public static ChunkingConfig createRandomizedChunk() {
|
||||
ChunkingConfig.Mode mode = randomFrom(ChunkingConfig.Mode.values());
|
||||
TimeValue timeSpan = null;
|
||||
if (mode == ChunkingConfig.Mode.MANUAL) {
|
||||
// time span is required to be at least 1 millis, so we use a custom method to generate a time value here
|
||||
timeSpan = randomPositiveSecondsMinutesHours();
|
||||
}
|
||||
return new ChunkingConfig(mode, timeSpan);
|
||||
}
|
||||
|
||||
private static TimeValue randomPositiveSecondsMinutesHours() {
|
||||
return new TimeValue(randomIntBetween(1, 1000), randomFrom(Arrays.asList(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS)));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig> {
|
||||
|
||||
@Override
|
||||
protected DatafeedConfig createTestInstance() {
|
||||
long bucketSpanMillis = 3600000;
|
||||
DatafeedConfig.Builder builder = constructBuilder();
|
||||
builder.setIndices(randomStringList(1, 10));
|
||||
builder.setTypes(randomStringList(0, 10));
|
||||
if (randomBoolean()) {
|
||||
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
|
||||
}
|
||||
boolean addScriptFields = randomBoolean();
|
||||
if (addScriptFields) {
|
||||
int scriptsSize = randomInt(3);
|
||||
List<ScriptField> scriptFields = new ArrayList<>(scriptsSize);
|
||||
for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) {
|
||||
scriptFields.add(new ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)),
|
||||
randomBoolean()));
|
||||
}
|
||||
builder.setScriptFields(scriptFields);
|
||||
}
|
||||
Long aggHistogramInterval = null;
|
||||
if (randomBoolean()) {
|
||||
// can only test with a single agg as the xcontent order gets randomized by test base class and then
|
||||
// the actual xcontent isn't the same and test fail.
|
||||
// Testing with a single agg is ok as we don't have special list xcontent logic
|
||||
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder();
|
||||
aggHistogramInterval = randomNonNegativeLong();
|
||||
aggHistogramInterval = aggHistogramInterval > bucketSpanMillis ? bucketSpanMillis : aggHistogramInterval;
|
||||
aggHistogramInterval = aggHistogramInterval <= 0 ? 1 : aggHistogramInterval;
|
||||
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
||||
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
|
||||
.interval(aggHistogramInterval).subAggregation(maxTime).field("time"));
|
||||
builder.setAggregations(aggs);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
if (aggHistogramInterval == null) {
|
||||
builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000)));
|
||||
} else {
|
||||
builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, 1_000_000)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
return new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
public static List<String> randomStringList(int min, int max) {
|
||||
int size = scaledRandomIntBetween(min, max);
|
||||
List<String> list = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
list.add(randomAlphaOfLength(10));
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DatafeedConfig doParseInstance(XContentParser parser) {
|
||||
return DatafeedConfig.PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
private static final String FUTURE_DATAFEED = "{\n" +
|
||||
" \"datafeed_id\": \"farequote-datafeed\",\n" +
|
||||
" \"job_id\": \"farequote\",\n" +
|
||||
" \"frequency\": \"1h\",\n" +
|
||||
" \"indices\": [\"farequote1\", \"farequote2\"],\n" +
|
||||
" \"tomorrows_technology_today\": \"amazing\",\n" +
|
||||
" \"scroll_size\": 1234\n" +
|
||||
"}";
|
||||
|
||||
public void testFutureMetadataParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
|
||||
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
|
||||
assertNotNull(DatafeedConfig.PARSER.apply(parser, null).build());
|
||||
}
|
||||
|
||||
public void testCopyConstructor() {
|
||||
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
||||
DatafeedConfig datafeedConfig = createTestInstance();
|
||||
DatafeedConfig copy = new DatafeedConfig.Builder(datafeedConfig).build();
|
||||
assertEquals(datafeedConfig, copy);
|
||||
}
|
||||
}
|
||||
|
||||
public void testCheckValid_GivenNullIdInConstruction() {
|
||||
expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(null, null));
|
||||
}
|
||||
|
||||
public void testCheckValid_GivenNullJobId() {
|
||||
expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null));
|
||||
}
|
||||
|
||||
public void testCheckValid_GivenNullIndices() {
|
||||
DatafeedConfig.Builder conf = constructBuilder();
|
||||
expectThrows(NullPointerException.class, () -> conf.setIndices(null));
|
||||
}
|
||||
|
||||
public void testCheckValid_GivenNullType() {
|
||||
DatafeedConfig.Builder conf = constructBuilder();
|
||||
expectThrows(NullPointerException.class, () -> conf.setTypes(null));
|
||||
}
|
||||
|
||||
public void testCheckValid_GivenNullQuery() {
|
||||
DatafeedConfig.Builder conf = constructBuilder();
|
||||
expectThrows(NullPointerException.class, () -> conf.setQuery(null));
|
||||
}
|
||||
|
||||
public static String randomValidDatafeedId() {
|
||||
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray());
|
||||
return generator.ofCodePointsLength(random(), 10, 10);
|
||||
}
|
||||
|
||||
private static DatafeedConfig.Builder constructBuilder() {
|
||||
return new DatafeedConfig.Builder(randomValidDatafeedId(), randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.ml.datafeed;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate> {
|
||||
|
||||
@Override
|
||||
protected DatafeedUpdate createTestInstance() {
|
||||
DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(DatafeedConfigTests.randomValidDatafeedId());
|
||||
if (randomBoolean()) {
|
||||
builder.setJobId(randomAlphaOfLength(10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, Integer.MAX_VALUE)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, Integer.MAX_VALUE)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setIndices(DatafeedConfigTests.randomStringList(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setTypes(DatafeedConfigTests.randomStringList(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int scriptsSize = randomInt(3);
|
||||
List<SearchSourceBuilder.ScriptField> scriptFields = new ArrayList<>(scriptsSize);
|
||||
for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) {
|
||||
scriptFields.add(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)),
|
||||
randomBoolean()));
|
||||
}
|
||||
builder.setScriptFields(scriptFields);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
// can only test with a single agg as the xcontent order gets randomized by test base class and then
|
||||
// the actual xcontent isn't the same and test fail.
|
||||
// Testing with a single agg is ok as we don't have special list xcontent logic
|
||||
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder();
|
||||
aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10)));
|
||||
builder.setAggregations(aggs);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DatafeedUpdate doParseInstance(XContentParser parser) {
|
||||
return DatafeedUpdate.PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
return new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
}
|
|
@ -41,7 +41,7 @@ Object httpPrincipal = new Object() {
|
|||
@Override
|
||||
String toString() {
|
||||
InetAddress resolvedAddress = InetAddress.getByName('127.0.0.1')
|
||||
return "HTTP/" + resolvedAddress.getHostName()
|
||||
return "HTTP/" + resolvedAddress.getCanonicalHostName()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ public class KerberosAuthenticationIT extends ESRestTestCase {
|
|||
protected HttpHost buildHttpHost(String host, int port) {
|
||||
try {
|
||||
InetAddress inetAddress = InetAddress.getByName(host);
|
||||
return super.buildHttpHost(inetAddress.getHostName(), port);
|
||||
return super.buildHttpHost(inetAddress.getCanonicalHostName(), port);
|
||||
} catch (UnknownHostException e) {
|
||||
assumeNoException("failed to resolve host [" + host + "]", e);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
grant {
|
||||
permission javax.security.auth.AuthPermission "doAsPrivileged";
|
||||
permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\"";
|
||||
permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost.localdomain@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\"";
|
||||
permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost4@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\"";
|
||||
permission javax.security.auth.kerberos.DelegationPermission "\"HTTP/localhost4.localdomain4@BUILD.ELASTIC.CO\" \"krbtgt/BUILD.ELASTIC.CO@BUILD.ELASTIC.CO\"";
|
||||
};
|
Loading…
Reference in New Issue