From 03241037373a727444cd7fda2ab23825a6eb84d6 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 14 Jun 2018 18:30:37 -0700 Subject: [PATCH 01/15] Painless: Fix bug for static method calls on interfaces (#31348) Static method calls on interfaces were not being called correctly which was causing JVM crashes. This change fixes the issue. --- .../java/org/elasticsearch/painless/Def.java | 3 ++- .../org/elasticsearch/painless/Definition.java | 17 +++++++++++++++-- .../org/elasticsearch/painless/FunctionRef.java | 7 +++++++ .../elasticsearch/painless/LambdaBootstrap.java | 12 +++++++++--- .../elasticsearch/painless/WriterConstants.java | 4 ++-- .../painless/node/ECapturingFunctionRef.java | 3 ++- .../painless/node/EFunctionRef.java | 3 ++- .../elasticsearch/painless/node/ELambda.java | 3 ++- .../painless/BasicExpressionTests.java | 5 +++++ .../painless/FunctionRefTests.java | 5 +++++ 10 files changed, 51 insertions(+), 11 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index 661af1b6c91..988a31a24ee 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -376,7 +376,8 @@ public final class Def { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateMethodType + ref.delegateMethodType, + ref.isDelegateInterface ? 1 : 0 ); return callSite.dynamicInvoker().asType(MethodType.methodType(clazz.clazz, captures)); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index f97df128f15..75575d6f125 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.spi.Whitelist; +import org.objectweb.asm.Opcodes; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -202,16 +203,28 @@ public final class Definition { public void write(MethodWriter writer) { final org.objectweb.asm.Type type; + final Class clazz; if (augmentation != null) { assert java.lang.reflect.Modifier.isStatic(modifiers); + clazz = augmentation; type = org.objectweb.asm.Type.getType(augmentation); } else { + clazz = owner.clazz; type = owner.type; } if (java.lang.reflect.Modifier.isStatic(modifiers)) { - writer.invokeStatic(type, method); - } else if (java.lang.reflect.Modifier.isInterface(owner.clazz.getModifiers())) { + // invokeStatic assumes that the owner class is not an interface, so this is a + // special case for interfaces where the interface method boolean needs to be set to + // true to reference the appropriate class constant when calling a static interface + // method since java 8 did not check, but java 9 and 10 do + if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) { + writer.visitMethodInsn(Opcodes.INVOKESTATIC, + type.getInternalName(), name, getMethodType().toMethodDescriptorString(), true); + } else { + writer.invokeStatic(type, method); + } + } else if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) { writer.invokeInterface(type, method); } else { writer.invokeVirtual(type, method); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 66cf78e8572..0b698dd2441 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -66,6 +66,9 @@ public class FunctionRef { /** delegate method type method as type */ public final Type delegateType; + /** whether a call is made on a delegate interface */ + public final boolean isDelegateInterface; + /** * Creates a new FunctionRef, which will resolve {@code type::call} from the whitelist. * @param definition the whitelist against which this script is being compiled @@ -97,10 +100,13 @@ public class FunctionRef { // the Painless$Script class can be inferred if owner is null if (delegateMethod.owner == null) { delegateClassName = CLASS_NAME; + isDelegateInterface = false; } else if (delegateMethod.augmentation != null) { delegateClassName = delegateMethod.augmentation.getName(); + isDelegateInterface = delegateMethod.augmentation.isInterface(); } else { delegateClassName = delegateMethod.owner.clazz.getName(); + isDelegateInterface = delegateMethod.owner.clazz.isInterface(); } if ("".equals(delegateMethod.name)) { @@ -139,6 +145,7 @@ public class FunctionRef { delegateInvokeType = H_INVOKESTATIC; this.delegateMethodName = delegateMethodName; this.delegateMethodType = delegateMethodType.dropParameterTypes(0, numCaptures); + isDelegateInterface = false; this.interfaceMethod = null; delegateMethod = null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java index 7a2ec9da34e..3fc8554b271 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java @@ -188,6 +188,10 @@ public final class LambdaBootstrap { * @param delegateMethodName The name of the method to be called in the Painless script class * @param delegateMethodType The type of method call in the Painless script class without * the captured types + * @param isDelegateInterface If the method to be called is owned by an interface where + * if the value is '1' if the delegate is an interface and '0' + * otherwise; note this is an int because the bootstrap method + * cannot convert constants to boolean * @return A {@link CallSite} linked to a factory method for creating a lambda class * that implements the expected functional interface * @throws LambdaConversionException Thrown when an illegal type conversion occurs at link time @@ -200,7 +204,8 @@ public final class LambdaBootstrap { String delegateClassName, int delegateInvokeType, String delegateMethodName, - MethodType delegateMethodType) + MethodType delegateMethodType, + int isDelegateInterface) throws LambdaConversionException { Loader loader = (Loader)lookup.lookupClass().getClassLoader(); String lambdaClassName = Type.getInternalName(lookup.lookupClass()) + "$$Lambda" + loader.newLambdaIdentifier(); @@ -225,7 +230,7 @@ public final class LambdaBootstrap { generateInterfaceMethod(cw, factoryMethodType, lambdaClassType, interfaceMethodName, interfaceMethodType, delegateClassType, delegateInvokeType, - delegateMethodName, delegateMethodType, captures); + delegateMethodName, delegateMethodType, isDelegateInterface == 1, captures); endLambdaClass(cw); @@ -369,6 +374,7 @@ public final class LambdaBootstrap { int delegateInvokeType, String delegateMethodName, MethodType delegateMethodType, + boolean isDelegateInterface, Capture[] captures) throws LambdaConversionException { @@ -434,7 +440,7 @@ public final class LambdaBootstrap { Handle delegateHandle = new Handle(delegateInvokeType, delegateClassType.getInternalName(), delegateMethodName, delegateMethodType.toMethodDescriptorString(), - delegateInvokeType == H_INVOKEINTERFACE); + isDelegateInterface); iface.invokeDynamic(delegateMethodName, Type.getMethodType(interfaceMethodType .toMethodDescriptorString()).getDescriptor(), DELEGATE_BOOTSTRAP_HANDLE, delegateHandle); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index 9150e2609b7..18d7d94492e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -141,8 +141,8 @@ public final class WriterConstants { /** invokedynamic bootstrap for lambda expression/method references */ public static final MethodType LAMBDA_BOOTSTRAP_TYPE = - MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, - MethodType.class, MethodType.class, String.class, int.class, String.class, MethodType.class); + MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class, + MethodType.class, String.class, int.class, String.class, MethodType.class, int.class); public static final Handle LAMBDA_BOOTSTRAP_HANDLE = new Handle(Opcodes.H_INVOKESTATIC, Type.getInternalName(LambdaBootstrap.class), "lambdaBootstrap", LAMBDA_BOOTSTRAP_TYPE.toMethodDescriptorString(), false); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index 724679d3f85..e6f2f7ebf91 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -121,7 +121,8 @@ public final class ECapturingFunctionRef extends AExpression implements ILambda ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index 636623004c9..c82b1003a55 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -112,7 +112,8 @@ public final class EFunctionRef extends AExpression implements ILambda { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } else { // TODO: don't do this: its just to cutover :) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index c37ff435f56..a7213e75ca4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -222,7 +222,8 @@ public final class ELambda extends AExpression implements ILambda { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } else { // placeholder diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index 97e1f01fdfc..6ff727d987c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -264,6 +264,11 @@ public class BasicExpressionTests extends ScriptTestCase { // assertEquals(null, exec("def a = ['thing': 'bar']; a.other?.cat?.dog = 'wombat'; return a.other?.cat?.dog")); } + // test to ensure static interface methods are called correctly + public void testStaticInterfaceMethod() { + assertEquals(4, exec("def values = [1, 4, 3, 2]; values.sort(Comparator.comparing(p -> p)); return values[3]")); + } + private void assertMustBeNullable(String script) { Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> exec(script)); assertEquals("Result of null safe operator must be nullable", e.getMessage()); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java index 7c49d042108..fd47db6b83d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java @@ -184,6 +184,11 @@ public class FunctionRefTests extends ScriptTestCase { "def map = new HashMap(); f(map::getOrDefault)")); } + public void testInterfaceStaticMethod() { + assertEquals(-1, exec("Supplier get(Supplier supplier) { return supplier }" + + "Supplier s = get(Comparator::naturalOrder); s.get().compare(1, 2)")); + } + public void testMethodMissing() { Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("List l = [2, 1]; l.sort(Integer::bogus); return l.get(0);"); From e5b7137508247b6490e42bbde6fa76f51c5ef343 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 14 Jun 2018 21:32:07 -0400 Subject: [PATCH 02/15] TEST: getCapturedRequestsAndClear should be atomic (#31312) We might lose messages between getCapturedRequestsAndClear calls. This commit makes sure that both getCapturedRequestsAndClear and getCapturedRequestsByTargetNodeAndClear are atomic. --- .../test/transport/CapturingTransport.java | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 81fc934ca6d..318c70c2933 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -46,6 +46,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -94,9 +95,17 @@ public class CapturingTransport implements Transport { * @return the captured requests */ public CapturedRequest[] getCapturedRequestsAndClear() { - CapturedRequest[] capturedRequests = capturedRequests(); - clear(); - return capturedRequests; + List requests = new ArrayList<>(capturedRequests.size()); + capturedRequests.drainTo(requests); + return requests.toArray(new CapturedRequest[0]); + } + + private Map> groupRequestsByTargetNode(Collection requests) { + Map> result = new HashMap<>(); + for (CapturedRequest request : requests) { + result.computeIfAbsent(request.node.getId(), node -> new ArrayList<>()).add(request); + } + return result; } /** @@ -104,16 +113,7 @@ public class CapturingTransport implements Transport { * Doesn't clear the captured request list. See {@link #clear()} */ public Map> capturedRequestsByTargetNode() { - Map> map = new HashMap<>(); - for (CapturedRequest request : capturedRequests) { - List nodeList = map.get(request.node.getId()); - if (nodeList == null) { - nodeList = new ArrayList<>(); - map.put(request.node.getId(), nodeList); - } - nodeList.add(request); - } - return map; + return groupRequestsByTargetNode(capturedRequests); } /** @@ -125,9 +125,9 @@ public class CapturingTransport implements Transport { * @return the captured requests grouped by target node */ public Map> getCapturedRequestsByTargetNodeAndClear() { - Map> map = capturedRequestsByTargetNode(); - clear(); - return map; + List requests = new ArrayList<>(capturedRequests.size()); + capturedRequests.drainTo(requests); + return groupRequestsByTargetNode(requests); } /** clears captured requests */ From 87a676e4d538a1533964bfe034f88782ca20cbfb Mon Sep 17 00:00:00 2001 From: Ben Abrams Date: Thu, 14 Jun 2018 18:41:02 -0700 Subject: [PATCH 03/15] Do not set vm.max_map_count when unnecessary (#31285) This commit modifies the Sys V init startup scripts to only modify vm.max_map_count if needed. In this case, needed means that the current value is less than our default value of 262144 maps. --- .../packages/src/deb/init.d/elasticsearch | 2 +- .../packages/src/rpm/init.d/elasticsearch | 2 +- .../packaging/tests/70_sysv_initd.bats | 28 +++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/distribution/packages/src/deb/init.d/elasticsearch b/distribution/packages/src/deb/init.d/elasticsearch index 6d3efd99ca6..21ac80a9c22 100755 --- a/distribution/packages/src/deb/init.d/elasticsearch +++ b/distribution/packages/src/deb/init.d/elasticsearch @@ -122,7 +122,7 @@ case "$1" in ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/distribution/packages/src/rpm/init.d/elasticsearch b/distribution/packages/src/rpm/init.d/elasticsearch index 01dc4e691c0..d0fb4f759d1 100644 --- a/distribution/packages/src/rpm/init.d/elasticsearch +++ b/distribution/packages/src/rpm/init.d/elasticsearch @@ -90,7 +90,7 @@ start() { if [ -n "$MAX_LOCKED_MEMORY" ]; then ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 026b46e21bc..51409140933 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -163,3 +163,31 @@ setup() { assert_file_exist /var/log/elasticsearch/gc.log.0.current stop_elasticsearch_service } + +# Ensures that if $MAX_MAP_COUNT is less than the set value on the OS +# it will be updated +@test "[INIT.D] sysctl is run when the value set is too small" { + # intentionally a ridiculously low number + sysctl -q -w vm.max_map_count=100 + start_elasticsearch_service + max_map_count=$(sysctl -n vm.max_map_count) + stop_elasticsearch_service + + [ $max_map_count = 262144 ] + +} + +# Ensures that if $MAX_MAP_COUNT is greater than the set vaule on the OS +# we do not attempt to update it this should cover equality as well as I think +# we can trust that equality operators work as intended. +@test "[INIT.D] sysctl is not run when it already has a larger or equal value set" { + # intentionally set to the default +1 + sysctl -q -w vm.max_map_count=262145 + start_elasticsearch_service + max_map_count=$(sysctl -n vm.max_map_count) + stop_elasticsearch_service + + # default value +1 + [ $max_map_count = 262145 ] + +} From 1c5cec0ac7ea6cba5faf25de99de89af46e0954e Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 14 Jun 2018 20:16:40 -0600 Subject: [PATCH 04/15] Remove http status code maps (#31350) Currently we maintain a compatibility map of http status codes in both the netty4 and nio modules. These maps convert a RestStatus to a netty HttpResponseStatus. However, as these fundamentally represent integers, we can just use the netty valueOf method to convert a RestStatus to a HttpResponseStatus. --- .../http/netty4/Netty4HttpResponse.java | 63 +------------------ .../http/nio/NioHttpResponse.java | 62 +----------------- 2 files changed, 2 insertions(+), 123 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java index fde7be335db..68c1d312780 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java @@ -27,17 +27,13 @@ import org.elasticsearch.http.HttpResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.netty4.Netty4Utils; -import java.util.Collections; -import java.util.EnumMap; -import java.util.Map; - public class Netty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse, HttpPipelinedMessage { private final int sequence; private final Netty4HttpRequest request; Netty4HttpResponse(Netty4HttpRequest request, RestStatus status, BytesReference content) { - super(request.nettyRequest().protocolVersion(), getStatus(status), Netty4Utils.toByteBuf(content)); + super(request.nettyRequest().protocolVersion(), HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content)); this.sequence = request.sequence(); this.request = request; } @@ -60,62 +56,5 @@ public class Netty4HttpResponse extends DefaultFullHttpResponse implements HttpR public Netty4HttpRequest getRequest() { return request; } - - private static Map MAP; - - static { - EnumMap map = new EnumMap<>(RestStatus.class); - map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); - map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); - map.put(RestStatus.OK, HttpResponseStatus.OK); - map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); - map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); - map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); - map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); - map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); - map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); - map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? - map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); - map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); - map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); - map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); - map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); - map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); - map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); - map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); - map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); - map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); - map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); - map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); - map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); - map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); - map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); - map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); - map.put(RestStatus.GONE, HttpResponseStatus.GONE); - map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); - map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); - map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); - map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); - map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); - map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); - map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); - map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); - map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); - map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); - map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); - map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); - map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); - map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); - MAP = Collections.unmodifiableMap(map); - } - - private static HttpResponseStatus getStatus(RestStatus status) { - return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); - } - } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java index 24de843dcc8..d6749466738 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java @@ -26,17 +26,13 @@ import org.elasticsearch.http.HttpPipelinedMessage; import org.elasticsearch.http.HttpResponse; import org.elasticsearch.rest.RestStatus; -import java.util.Collections; -import java.util.EnumMap; -import java.util.Map; - public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResponse, HttpPipelinedMessage { private final int sequence; private final NioHttpRequest request; NioHttpResponse(NioHttpRequest request, RestStatus status, BytesReference content) { - super(request.nettyRequest().protocolVersion(), getStatus(status), ByteBufUtils.toByteBuf(content)); + super(request.nettyRequest().protocolVersion(), HttpResponseStatus.valueOf(status.getStatus()), ByteBufUtils.toByteBuf(content)); this.sequence = request.sequence(); this.request = request; } @@ -56,63 +52,7 @@ public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResp return sequence; } - private static Map MAP; - public NioHttpRequest getRequest() { return request; } - - static { - EnumMap map = new EnumMap<>(RestStatus.class); - map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); - map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); - map.put(RestStatus.OK, HttpResponseStatus.OK); - map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); - map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); - map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); - map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); - map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); - map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); - map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? - map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); - map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); - map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); - map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); - map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); - map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); - map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); - map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); - map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); - map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); - map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); - map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); - map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); - map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); - map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); - map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); - map.put(RestStatus.GONE, HttpResponseStatus.GONE); - map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); - map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); - map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); - map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); - map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); - map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); - map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); - map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); - map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); - map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); - map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); - map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); - map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); - map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); - MAP = Collections.unmodifiableMap(map); - } - - private static HttpResponseStatus getStatus(RestStatus status) { - return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); - } } From 8c6ee7db54fa3156e9eed968d23a9bce55694e0e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 15 Jun 2018 07:32:49 +0200 Subject: [PATCH 05/15] Describe how to add a plugin in Dockerfile (#31340) When installing a plugin, people need to add the `--batch` option. It's better to document it as it could be a common use case. --- docs/reference/setup/install/docker.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 0e62fa207f6..b18f7c57a16 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -279,6 +279,13 @@ docker build --tag=elasticsearch-custom . docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom -------------------------------------------- +Some plugins require additional security permissions. You have to explicitly accept +them either by attaching a `tty` when you run the Docker image and accepting yes at +the prompts, or inspecting the security permissions separately and if you are +comfortable with them adding the `--batch` flag to the plugin install command. +See {plugins}/_other_command_line_parameters.html[Plugin Management documentation] +for more details. + ===== D. Override the image's default https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options[CMD] Options can be passed as command-line options to the {es} process by From 992c7889ee83fc3514145d0875ac359010f623ae Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 15 Jun 2018 09:26:47 +0200 Subject: [PATCH 06/15] Uncouple persistent task state and status (#31031) This pull request removes the relationship between the state of persistent task (as stored in the cluster state) and the status of the task (as reported by the Task APIs and used in various places) that have been confusing for some time (#29608). In order to do that, a new PersistentTaskState interface is added. This interface represents the persisted state of a persistent task. The methods used to update the state of persistent tasks are renamed: updatePersistentStatus() becomes updatePersistentTaskState() and now takes a PersistentTaskState as a parameter. The Task.Status type as been changed to PersistentTaskState in all places were it make sense (in persistent task customs in cluster state and all other methods that deal with the state of an allocated persistent task). --- .../persistent/AllocatedPersistentTask.java | 8 +- .../NodePersistentTasksExecutor.java | 16 ++- .../persistent/PersistentTaskState.java | 29 +++++ .../PersistentTasksClusterService.java | 32 ++--- .../PersistentTasksCustomMetaData.java | 87 ++++++------- .../persistent/PersistentTasksExecutor.java | 3 +- .../PersistentTasksNodeService.java | 10 +- .../persistent/PersistentTasksService.java | 14 +-- .../UpdatePersistentTaskStatusAction.java | 31 +++-- .../PersistentTasksClusterServiceTests.java | 3 +- .../PersistentTasksCustomMetaDataTests.java | 22 ++-- .../PersistentTasksDecidersTestCase.java | 3 +- .../persistent/PersistentTasksExecutorIT.java | 12 +- .../PersistentTasksNodeServiceTests.java | 26 ++-- .../persistent/TestPersistentTasksPlugin.java | 41 +++---- .../UpdatePersistentTaskRequestTests.java | 7 +- .../xpack/core/XPackClientPlugin.java | 26 ++-- .../xpack/core/ml/MlMetadata.java | 12 +- .../xpack/core/ml/datafeed/DatafeedState.java | 4 +- .../{JobTaskStatus.java => JobTaskState.java} | 18 +-- .../core/rollup/job/RollupJobStatus.java | 5 +- .../ml/action/TransportCloseJobAction.java | 6 +- .../ml/action/TransportOpenJobAction.java | 16 +-- .../action/TransportStartDatafeedAction.java | 9 +- .../action/TransportStopDatafeedAction.java | 8 +- .../xpack/ml/datafeed/DatafeedManager.java | 2 +- .../ml/datafeed/DatafeedNodeSelector.java | 14 +-- .../autodetect/AutodetectProcessManager.java | 10 +- .../xpack/ml/MlMetadataTests.java | 4 +- .../action/TransportCloseJobActionTests.java | 2 +- .../action/TransportOpenJobActionTests.java | 6 +- .../TransportStopDatafeedActionTests.java | 4 +- .../ml/datafeed/DatafeedManagerTests.java | 4 +- .../datafeed/DatafeedNodeSelectorTests.java | 8 +- .../integration/BasicDistributedJobsIT.java | 22 ++-- .../xpack/ml/integration/TooManyJobsIT.java | 4 +- ...tatusTests.java => JobTaskStateTests.java} | 16 +-- .../AutodetectProcessManagerTests.java | 4 +- .../xpack/rollup/job/RollupJobTask.java | 60 ++++----- .../elasticsearch/xpack/rollup/RollupIT.java | 65 ++++------ .../xpack/rollup/job/RollupJobTaskTests.java | 114 ++++++++++-------- .../MlNativeAutodetectIntegTestCase.java | 8 +- 42 files changed, 404 insertions(+), 391 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/{JobTaskStatus.java => JobTaskState.java} (86%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/{JobTaskStatusTests.java => JobTaskStateTests.java} (53%) diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index d4d299b7e4a..54dcffab6e3 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; @@ -77,8 +76,9 @@ public class AllocatedPersistentTask extends CancellableTask { *

* This doesn't affect the status of this allocated task. */ - public void updatePersistentStatus(Task.Status status, ActionListener> listener) { - persistentTasksService.updateStatus(persistentTaskId, allocationId, status, listener); + public void updatePersistentTaskState(final PersistentTaskState state, + final ActionListener> listener) { + persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, listener); } public String getPersistentTaskId() { @@ -116,7 +116,7 @@ public class AllocatedPersistentTask extends CancellableTask { } protected final boolean isCompleted() { - return state.get() == State.COMPLETED; + return state.get() == State.COMPLETED; } boolean markAsCancelled() { diff --git a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java index bf42733ff54..87ea08dc74d 100644 --- a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java @@ -20,7 +20,6 @@ package org.elasticsearch.persistent; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; /** @@ -29,16 +28,17 @@ import org.elasticsearch.threadpool.ThreadPool; * It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService */ public class NodePersistentTasksExecutor { + private final ThreadPool threadPool; - public NodePersistentTasksExecutor(ThreadPool threadPool) { + NodePersistentTasksExecutor(ThreadPool threadPool) { this.threadPool = threadPool; } - public void executeTask(Params params, - @Nullable Task.Status status, - AllocatedPersistentTask task, - PersistentTasksExecutor executor) { + public void executeTask(final Params params, + final @Nullable PersistentTaskState state, + final AllocatedPersistentTask task, + final PersistentTasksExecutor executor) { threadPool.executor(executor.getExecutor()).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -49,14 +49,12 @@ public class NodePersistentTasksExecutor { @Override protected void doRun() throws Exception { try { - executor.nodeOperation(task, params, status); + executor.nodeOperation(task, params, state); } catch (Exception ex) { task.markAsFailed(ex); } } }); - } - } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java new file mode 100644 index 00000000000..57c913f51bb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +/** + * {@link PersistentTaskState} represents the state of the persistent tasks, as it + * is persisted in the cluster state. + */ +public interface PersistentTaskState extends ToXContentObject, NamedWriteable { +} diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 1464279a814..9ed0af010b5 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -35,7 +35,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.decider.AssignmentDecision; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.tasks.Task; import java.util.Objects; @@ -178,27 +177,30 @@ public class PersistentTasksClusterService extends AbstractComponent implements } /** - * Update task status + * Update the state of a persistent task * - * @param id the id of a persistent task - * @param allocationId the expected allocation id of the persistent task - * @param status new status - * @param listener the listener that will be called when task is removed + * @param taskId the id of a persistent task + * @param taskAllocationId the expected allocation id of the persistent task + * @param taskState new state + * @param listener the listener that will be called when task is removed */ - public void updatePersistentTaskStatus(String id, long allocationId, Task.Status status, ActionListener> listener) { - clusterService.submitStateUpdateTask("update task status", new ClusterStateUpdateTask() { + public void updatePersistentTaskState(final String taskId, + final long taskAllocationId, + final PersistentTaskState taskState, + final ActionListener> listener) { + clusterService.submitStateUpdateTask("update task state", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState); - if (tasksInProgress.hasTask(id, allocationId)) { - return update(currentState, tasksInProgress.updateTaskStatus(id, status)); + if (tasksInProgress.hasTask(taskId, taskAllocationId)) { + return update(currentState, tasksInProgress.updateTaskState(taskId, taskState)); } else { - if (tasksInProgress.hasTask(id)) { - logger.warn("trying to update status on task {} with unexpected allocation id {}", id, allocationId); + if (tasksInProgress.hasTask(taskId)) { + logger.warn("trying to update state on task {} with unexpected allocation id {}", taskId, taskAllocationId); } else { - logger.warn("trying to update status on non-existing task {}", id); + logger.warn("trying to update state on non-existing task {}", taskId); } - throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", id, allocationId); + throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", taskId, taskAllocationId); } } @@ -209,7 +211,7 @@ public class PersistentTasksClusterService extends AbstractComponent implements @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, id)); + listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, taskId)); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index 09346704a80..f81b7c770e5 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -38,8 +38,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.Task.Status; import java.io.IOException; import java.util.Collection; @@ -61,13 +59,12 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru * A cluster state record that contains a list of all running persistent tasks */ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable implements MetaData.Custom { - public static final String TYPE = "persistent_tasks"; + public static final String TYPE = "persistent_tasks"; private static final String API_CONTEXT = MetaData.XContentContext.API.toString(); // TODO: Implement custom Diff for tasks private final Map> tasks; - private final long lastAllocationId; public PersistentTasksCustomMetaData(long lastAllocationId, Map> tasks) { @@ -94,8 +91,8 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable, String> parser = new ObjectParser<>("named"); parser.declareObject(TaskDescriptionBuilder::setParams, (p, c) -> p.namedObject(PersistentTaskParams.class, c, null), new ParseField("params")); - parser.declareObject(TaskDescriptionBuilder::setStatus, - (p, c) -> p.namedObject(Status.class, c, null), new ParseField("status")); + parser.declareObject(TaskDescriptionBuilder::setState, + (p, c) -> p.namedObject(PersistentTaskState.class, c, null), new ParseField("state", "status")); TASK_DESCRIPTION_PARSER = (XContentParser p, Void c, String name) -> parser.parse(p, new TaskDescriptionBuilder<>(name), name); // Assignment parser @@ -115,7 +112,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable builder = objects.get(0); taskBuilder.setTaskName(builder.taskName); taskBuilder.setParams(builder.params); - taskBuilder.setStatus(builder.status); + taskBuilder.setState(builder.state); }, TASK_DESCRIPTION_PARSER, new ParseField("task")); PERSISTENT_TASK_PARSER.declareObject(TaskBuilder::setAssignment, ASSIGNMENT_PARSER, new ParseField("assignment")); PERSISTENT_TASK_PARSER.declareLong(TaskBuilder::setAllocationIdOnLastStatusUpdate, @@ -123,12 +120,13 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable { + private final String taskName; private Params params; - private Status status; + private PersistentTaskState state; private TaskDescriptionBuilder(String taskName) { this.taskName = taskName; @@ -139,8 +137,8 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable implements Writeable, ToXContentObject { + private final String id; private final long allocationId; private final String taskName; private final P params; - @Nullable - private final Status status; + private final @Nullable PersistentTaskState state; private final Assignment assignment; - @Nullable - private final Long allocationIdOnLastStatusUpdate; + private final @Nullable Long allocationIdOnLastStatusUpdate; - public PersistentTask(String id, String taskName, P params, long allocationId, Assignment assignment) { - this(id, allocationId, taskName, params, null, assignment, null); + public PersistentTask(final String id, final String name, final P params, final long allocationId, final Assignment assignment) { + this(id, allocationId, name, params, null, assignment, null); } - public PersistentTask(PersistentTask

task, long allocationId, Assignment assignment) { - this(task.id, allocationId, task.taskName, task.params, task.status, - assignment, task.allocationId); + public PersistentTask(final PersistentTask

task, final long allocationId, final Assignment assignment) { + this(task.id, allocationId, task.taskName, task.params, task.state, assignment, task.allocationId); } - public PersistentTask(PersistentTask

task, Status status) { - this(task.id, task.allocationId, task.taskName, task.params, status, - task.assignment, task.allocationId); + public PersistentTask(final PersistentTask

task, final PersistentTaskState state) { + this(task.id, task.allocationId, task.taskName, task.params, state, task.assignment, task.allocationId); } - private PersistentTask(String id, long allocationId, String taskName, P params, - Status status, Assignment assignment, Long allocationIdOnLastStatusUpdate) { + private PersistentTask(final String id, final long allocationId, final String name, final P params, + final PersistentTaskState state, final Assignment assignment, final Long allocationIdOnLastStatusUpdate) { this.id = id; this.allocationId = allocationId; - this.taskName = taskName; + this.taskName = name; this.params = params; - this.status = status; + this.state = state; this.assignment = assignment; this.allocationIdOnLastStatusUpdate = allocationIdOnLastStatusUpdate; if (params != null) { @@ -300,10 +295,10 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable setStatus(Status status) { - this.status = status; + public TaskBuilder setState(PersistentTaskState state) { + this.state = state; return this; } @@ -489,8 +483,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable build() { - return new PersistentTask<>(id, allocationId, taskName, params, status, - assignment, allocationIdOnLastStatusUpdate); + return new PersistentTask<>(id, allocationId, taskName, params, state, assignment, allocationIdOnLastStatusUpdate); } } @@ -608,13 +601,13 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable taskInProgress = tasks.get(taskId); if (taskInProgress != null) { changed = true; - tasks.put(taskId, new PersistentTask<>(taskInProgress, status)); + tasks.put(taskId, new PersistentTask<>(taskInProgress, taskState)); } else { throw new ResourceNotFoundException("cannot update task with id {" + taskId + "}, the task no longer exists"); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java index de75b1ff540..758ffbe69a0 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import java.util.Map; @@ -118,7 +117,7 @@ public abstract class PersistentTasksExecutor runningTasks = new HashMap<>(); private final PersistentTasksService persistentTasksService; private final PersistentTasksExecutorRegistry persistentTasksExecutorRegistry; private final TaskManager taskManager; private final NodePersistentTasksExecutor nodePersistentTasksExecutor; - public PersistentTasksNodeService(Settings settings, PersistentTasksService persistentTasksService, PersistentTasksExecutorRegistry persistentTasksExecutorRegistry, @@ -172,7 +172,7 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu task.getPersistentTaskId(), task.getAllocationId()); try { runningTasks.put(taskInProgress.getAllocationId(), task); - nodePersistentTasksExecutor.executeTask(taskInProgress.getParams(), taskInProgress.getStatus(), task, executor); + nodePersistentTasksExecutor.executeTask(taskInProgress.getParams(), taskInProgress.getState(), task, executor); } catch (Exception e) { // Submit task failure task.markAsFailed(e); @@ -215,8 +215,8 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu } } - public static class Status implements Task.Status { + public static final String NAME = "persistent_executor"; private final AllocatedPersistentTask.State state; @@ -252,10 +252,6 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu return Strings.toString(this); } - public AllocatedPersistentTask.State getState() { - return state; - } - @Override public boolean isFragment() { return false; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 01c28dd5cd6..d0c791e3df0 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -113,13 +112,14 @@ public class PersistentTasksService extends AbstractComponent { * Notifies the master node that the state of a persistent task has changed. *

* Persistent task implementers shouldn't call this method directly and use - * {@link AllocatedPersistentTask#updatePersistentStatus} instead + * {@link AllocatedPersistentTask#updatePersistentTaskState} instead */ - void updateStatus(final String taskId, - final long taskAllocationID, - final Task.Status status, - final ActionListener> listener) { - UpdatePersistentTaskStatusAction.Request request = new UpdatePersistentTaskStatusAction.Request(taskId, taskAllocationID, status); + void sendUpdateStateRequest(final String taskId, + final long taskAllocationID, + final PersistentTaskState taskState, + final ActionListener> listener) { + UpdatePersistentTaskStatusAction.Request request = + new UpdatePersistentTaskStatusAction.Request(taskId, taskAllocationID, taskState); execute(request, UpdatePersistentTaskStatusAction.INSTANCE, listener); } diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index a898558fc26..a639e4bde53 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -63,16 +62,15 @@ public class UpdatePersistentTaskStatusAction extends Action { @@ -182,9 +178,10 @@ public class UpdatePersistentTaskStatusAction extends Action listener) { - persistentTasksClusterService.updatePersistentTaskStatus(request.taskId, request.allocationId, request.status, + persistentTasksClusterService.updatePersistentTaskState(request.taskId, request.allocationId, request.state, new ActionListener>() { @Override public void onResponse(PersistentTask task) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index 916fdee2136..f13a35613d5 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; @@ -649,7 +648,7 @@ public class PersistentTasksClusterServiceTests extends ESTestCase { } @Override - protected void nodeOperation(AllocatedPersistentTask task, P params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, P params, PersistentTaskState state) { throw new UnsupportedOperationException(); } })); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index 72e74359d30..5b1f74d6cdf 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -42,10 +42,9 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Builder; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; import java.io.IOException; @@ -79,7 +78,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ randomAssignment()); if (randomBoolean()) { // From time to time update status - tasks.updateTaskStatus(taskId, new Status(randomAlphaOfLength(10))); + tasks.updateTaskState(taskId, new State(randomAlphaOfLength(10))); } } return tasks.build(); @@ -96,7 +95,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ new Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::new), new Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::readDiffFrom), new Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new), - new Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) )); } @@ -118,7 +117,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ if (builder.getCurrentTaskIds().isEmpty()) { addRandomTask(builder); } else { - builder.updateTaskStatus(pickRandomTask(builder), randomBoolean() ? new Status(randomAlphaOfLength(10)) : null); + builder.updateTaskState(pickRandomTask(builder), randomBoolean() ? new State(randomAlphaOfLength(10)) : null); } break; case 3: @@ -155,9 +154,10 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ @Override protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(Arrays.asList( - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME), - TestParams::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, + new ParseField(TestPersistentTasksExecutor.NAME), TestParams::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, + new ParseField(TestPersistentTasksExecutor.NAME), State::fromXContent) )); } @@ -186,7 +186,7 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ // Things that should be serialized assertEquals(testTask.getTaskName(), newTask.getTaskName()); assertEquals(testTask.getId(), newTask.getId()); - assertEquals(testTask.getStatus(), newTask.getStatus()); + assertEquals(testTask.getState(), newTask.getState()); assertEquals(testTask.getParams(), newTask.getParams()); // Things that shouldn't be serialized @@ -224,10 +224,10 @@ public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializ case 2: if (builder.hasTask(lastKnownTask)) { changed = true; - builder.updateTaskStatus(lastKnownTask, randomBoolean() ? new Status(randomAlphaOfLength(10)) : null); + builder.updateTaskState(lastKnownTask, randomBoolean() ? new State(randomAlphaOfLength(10)) : null); } else { String fLastKnownTask = lastKnownTask; - expectThrows(ResourceNotFoundException.class, () -> builder.updateTaskStatus(fLastKnownTask, null)); + expectThrows(ResourceNotFoundException.class, () -> builder.updateTaskState(fLastKnownTask, null)); } break; case 3: diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java index 356e518198c..655a21a5f53 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -64,7 +63,7 @@ public abstract class PersistentTasksDecidersTestCase extends ESTestCase { public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { @Override - protected void nodeOperation(AllocatedPersistentTask task, Params params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, Params params, PersistentTaskState state) { logger.debug("Executing task {}", task); } }; diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8f37a2412ef..e746ff71627 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestTasksRequestBuilder; @@ -190,11 +190,11 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase { PersistentTasksCustomMetaData tasksInProgress = internalCluster().clusterService().state().getMetaData() .custom(PersistentTasksCustomMetaData.TYPE); assertThat(tasksInProgress.tasks().size(), equalTo(1)); - assertThat(tasksInProgress.tasks().iterator().next().getStatus(), nullValue()); + assertThat(tasksInProgress.tasks().iterator().next().getState(), nullValue()); int numberOfUpdates = randomIntBetween(1, 10); for (int i = 0; i < numberOfUpdates; i++) { - logger.info("Updating the task status"); + logger.info("Updating the task states"); // Complete the running task and make sure it finishes properly assertThat(new TestTasksRequestBuilder(client()).setOperation("update_status").setTaskId(firstRunningTask.getTaskId()) .get().getTasks().size(), equalTo(1)); @@ -202,8 +202,8 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase { int finalI = i; WaitForPersistentTaskFuture future1 = new WaitForPersistentTaskFuture<>(); persistentTasksService.waitForPersistentTaskCondition(taskId, - task -> task != null && task.getStatus() != null && task.getStatus().toString() != null && - task.getStatus().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"), + task -> task != null && task.getState() != null && task.getState().toString() != null && + task.getState().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"), TimeValue.timeValueSeconds(10), future1); assertThat(future1.get().getId(), equalTo(taskId)); } @@ -215,7 +215,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase { assertThrows(future1, IllegalStateException.class, "timed out after 10ms"); PlainActionFuture> failedUpdateFuture = new PlainActionFuture<>(); - persistentTasksService.updateStatus(taskId, -2, new Status("should fail"), failedUpdateFuture); + persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), failedUpdateFuture); assertThrows(failedUpdateFuture, ResourceNotFoundException.class, "the task with id " + taskId + " and allocation id -2 doesn't exist"); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 5000f73445b..906ecf23205 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -210,13 +210,12 @@ public class PersistentTasksNodeServiceTests extends ESTestCase { ClusterState state = createInitialClusterState(1, Settings.EMPTY); - Task.Status status = new TestPersistentTasksPlugin.Status("_test_phase"); + PersistentTaskState taskState = new TestPersistentTasksPlugin.State("_test_phase"); PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(); String taskId = UUIDs.base64UUID(); TestParams taskParams = new TestParams("other_0"); - tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams, - new Assignment("this_node", "test assignment on other node")); - tasks.updateTaskStatus(taskId, status); + tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams, new Assignment("this_node", "test assignment on other node")); + tasks.updateTaskState(taskId, taskState); MetaData.Builder metaData = MetaData.builder(state.metaData()); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()); ClusterState newClusterState = ClusterState.builder(state).metaData(metaData).build(); @@ -225,7 +224,7 @@ public class PersistentTasksNodeServiceTests extends ESTestCase { assertThat(executor.size(), equalTo(1)); assertThat(executor.get(0).params, sameInstance(taskParams)); - assertThat(executor.get(0).status, sameInstance(status)); + assertThat(executor.get(0).state, sameInstance(taskState)); assertThat(executor.get(0).task, sameInstance(nodeTask)); } @@ -331,15 +330,16 @@ public class PersistentTasksNodeServiceTests extends ESTestCase { } private class Execution { + private final PersistentTaskParams params; private final AllocatedPersistentTask task; - private final Task.Status status; + private final PersistentTaskState state; private final PersistentTasksExecutor holder; - Execution(PersistentTaskParams params, AllocatedPersistentTask task, Task.Status status, PersistentTasksExecutor holder) { + Execution(PersistentTaskParams params, AllocatedPersistentTask task, PersistentTaskState state, PersistentTasksExecutor holder) { this.params = params; this.task = task; - this.status = status; + this.state = state; this.holder = holder; } } @@ -352,11 +352,11 @@ public class PersistentTasksNodeServiceTests extends ESTestCase { } @Override - public void executeTask(Params params, - Task.Status status, - AllocatedPersistentTask task, - PersistentTasksExecutor executor) { - executions.add(new Execution(params, task, status, executor)); + public void executeTask(final Params params, + final PersistentTaskState state, + final AllocatedPersistentTask task, + final PersistentTasksExecutor executor) { + executions.add(new Execution(params, task, state, executor)); } public Execution get(int i) { diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 97b34079387..063a861b5c3 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -55,7 +55,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -100,16 +99,17 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P public List getNamedWriteables() { return Arrays.asList( new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new), - new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new NamedWriteableRegistry.Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) ); } @Override public List getNamedXContent() { return Arrays.asList( - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME), - TestParams::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, + new ParseField(TestPersistentTasksExecutor.NAME), TestParams::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, + new ParseField(TestPersistentTasksExecutor.NAME), State::fromXContent) ); } @@ -221,22 +221,22 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P } } - public static class Status implements Task.Status { + public static class State implements PersistentTaskState { private final String phase; - public static final ConstructingObjectParser STATUS_PARSER = - new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new Status((String) args[0])); + public static final ConstructingObjectParser STATE_PARSER = + new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new State((String) args[0])); static { - STATUS_PARSER.declareString(constructorArg(), new ParseField("phase")); + STATE_PARSER.declareString(constructorArg(), new ParseField("phase")); } - public Status(String phase) { + public State(String phase) { this.phase = requireNonNull(phase, "Phase cannot be null"); } - public Status(StreamInput in) throws IOException { + public State(StreamInput in) throws IOException { phase = in.readString(); } @@ -253,11 +253,10 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P return builder; } - public static Task.Status fromXContent(XContentParser parser) throws IOException { - return STATUS_PARSER.parse(parser, null); + public static PersistentTaskState fromXContent(XContentParser parser) throws IOException { + return STATE_PARSER.parse(parser, null); } - @Override public boolean isFragment() { return false; @@ -276,10 +275,10 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P // Implements equals and hashcode for testing @Override public boolean equals(Object obj) { - if (obj == null || obj.getClass() != Status.class) { + if (obj == null || obj.getClass() != State.class) { return false; } - Status other = (Status) obj; + State other = (State) obj; return phase.equals(other.phase); } @@ -289,7 +288,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P } } - public static class TestPersistentTasksExecutor extends PersistentTasksExecutor { public static final String NAME = "cluster:admin/persistent/test"; @@ -317,7 +315,7 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P } @Override - protected void nodeOperation(AllocatedPersistentTask task, TestParams params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, TestParams params, PersistentTaskState state) { logger.info("started node operation for the task {}", task); try { TestTask testTask = (TestTask) task; @@ -340,9 +338,9 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P } else if ("update_status".equals(testTask.getOperation())) { testTask.setOperation(null); CountDownLatch latch = new CountDownLatch(1); - Status newStatus = new Status("phase " + phase.incrementAndGet()); - logger.info("updating the task status to {}", newStatus); - task.updatePersistentStatus(newStatus, new ActionListener>() { + State newState = new State("phase " + phase.incrementAndGet()); + logger.info("updating the task state to {}", newState); + task.updatePersistentTaskState(newState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { logger.info("updating was successful"); @@ -540,5 +538,4 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P } - } diff --git a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java index 6e20bb00097..5ae54640f8e 100644 --- a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java @@ -20,9 +20,8 @@ package org.elasticsearch.persistent; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractStreamableTestCase; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction.Request; @@ -32,7 +31,7 @@ public class UpdatePersistentTaskRequestTests extends AbstractStreamableTestCase @Override protected Request createTestInstance() { - return new Request(UUIDs.base64UUID(), randomLong(), new Status(randomAlphaOfLength(10))); + return new Request(UUIDs.base64UUID(), randomLong(), new State(randomAlphaOfLength(10))); } @Override @@ -43,7 +42,7 @@ public class UpdatePersistentTaskRequestTests extends AbstractStreamableTestCase @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new NamedWriteableRegistry.Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) )); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a96de96fd4f..049089e62cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.license.PostStartBasicAction; import org.elasticsearch.license.PostStartTrialAction; import org.elasticsearch.license.PutLicenseAction; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -89,7 +90,7 @@ import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -325,9 +326,9 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl StartDatafeedAction.DatafeedParams::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, OpenJobAction.JobParams::new), - // ML - Task statuses - new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new), - new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream), + // ML - Task states + new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, DatafeedState.NAME, DatafeedState::fromStream), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), // monitoring @@ -350,7 +351,8 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl // rollup new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), - new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new) + new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new) ); } @@ -365,9 +367,9 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl StartDatafeedAction.DatafeedParams::fromXContent), new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(OpenJobAction.TASK_NAME), OpenJobAction.JobParams::fromXContent), - // ML - Task statuses - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(JobTaskStatus.NAME), JobTaskStatus::fromXContent), + // ML - Task states + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(JobTaskState.NAME), JobTaskState::fromXContent), // watcher new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(WatcherMetaData.TYPE), WatcherMetaData::fromXContent), @@ -375,8 +377,12 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE), LicensesMetaData::fromXContent), //rollup - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), RollupJob::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), + RollupJob::fromXContent), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), + RollupJobStatus::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(RollupJobStatus.NAME), + RollupJobStatus::fromXContent) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 861f386a909..5e145306f8c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJobLookup; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -402,9 +402,9 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { if (allowDeleteOpenJob == false) { PersistentTask jobTask = getJobTask(jobId, tasks); if (jobTask != null) { - JobTaskStatus jobTaskStatus = (JobTaskStatus) jobTask.getStatus(); + JobTaskState jobTaskState = (JobTaskState) jobTask.getState(); throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because the job is " - + ((jobTaskStatus == null) ? JobState.OPENING : jobTaskStatus.getState())); + + ((jobTaskState == null) ? JobState.OPENING : jobTaskState.getState())); } } Job.Builder jobBuilder = new Job.Builder(job); @@ -448,7 +448,7 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { public static JobState getJobState(String jobId, @Nullable PersistentTasksCustomMetaData tasks) { PersistentTask task = getJobTask(jobId, tasks); if (task != null) { - JobTaskStatus jobTaskState = (JobTaskStatus) task.getStatus(); + JobTaskState jobTaskState = (JobTaskState) task.getState(); if (jobTaskState == null) { return JobState.OPENING; } @@ -460,8 +460,8 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetaData tasks) { PersistentTask task = getDatafeedTask(datafeedId, tasks); - if (task != null && task.getStatus() != null) { - return (DatafeedState) task.getStatus(); + if (task != null && task.getState() != null) { + return (DatafeedState) task.getState(); } else { // If we haven't started a datafeed then there will be no persistent task, // which is the same as if the datafeed was't started diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index 7343600a6ee..d894f7b339f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import java.io.IOException; @@ -20,7 +20,7 @@ import java.util.Locale; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public enum DatafeedState implements Task.Status { +public enum DatafeedState implements PersistentTaskState { STARTED, STOPPED, STARTING, STOPPING; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java similarity index 86% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index de102798d1c..d9ab3357319 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -12,25 +12,25 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public class JobTaskStatus implements Task.Status { +public class JobTaskState implements PersistentTaskState { public static final String NAME = OpenJobAction.TASK_NAME; private static ParseField STATE = new ParseField("state"); private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); - private static final ConstructingObjectParser PARSER = + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, - args -> new JobTaskStatus((JobState) args[0], (Long) args[1])); + args -> new JobTaskState((JobState) args[0], (Long) args[1])); static { PARSER.declareField(constructorArg(), p -> { @@ -42,7 +42,7 @@ public class JobTaskStatus implements Task.Status { PARSER.declareLong(constructorArg(), ALLOCATION_ID); } - public static JobTaskStatus fromXContent(XContentParser parser) { + public static JobTaskState fromXContent(XContentParser parser) { try { return PARSER.parse(parser, null); } catch (IOException e) { @@ -53,12 +53,12 @@ public class JobTaskStatus implements Task.Status { private final JobState state; private final long allocationId; - public JobTaskStatus(JobState state, long allocationId) { + public JobTaskState(JobState state, long allocationId) { this.state = Objects.requireNonNull(state); this.allocationId = allocationId; } - public JobTaskStatus(StreamInput in) throws IOException { + public JobTaskState(StreamInput in) throws IOException { state = JobState.fromStream(in); allocationId = in.readLong(); } @@ -100,7 +100,7 @@ public class JobTaskStatus implements Task.Status { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - JobTaskStatus that = (JobTaskStatus) o; + JobTaskState that = (JobTaskState) o; return state == that.state && Objects.equals(allocationId, that.allocationId); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 86bc95e092c..4cbd5a3b455 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; import java.io.IOException; @@ -30,7 +31,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona * indexer's current position. When the allocated task updates its status, * it is providing a new version of this. */ -public class RollupJobStatus implements Task.Status { +public class RollupJobStatus implements Task.Status, PersistentTaskState { public static final String NAME = "xpack/rollup/job"; private final IndexerState state; @@ -73,7 +74,7 @@ public class RollupJobStatus implements Task.Status { currentPosition = in.readBoolean() ? new TreeMap<>(in.readMap()) : null; } - public IndexerState getState() { + public IndexerState getIndexerState() { return state; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 36bcfe92f00..083d4ce5b15 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -256,8 +256,8 @@ public class TransportCloseJobAction extends TransportTasksAction listener) { - JobTaskStatus taskStatus = new JobTaskStatus(JobState.CLOSING, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, ActionListener.wrap(task -> { + JobTaskState taskState = new JobTaskState(JobState.CLOSING, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(taskState, ActionListener.wrap(task -> { // we need to fork because we are now on a network threadpool and closeJob method may take a while to complete: threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 5de79621692..e7fb0fe5fb3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -39,12 +39,12 @@ import org.elasticsearch.index.Index; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -57,7 +57,7 @@ import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -208,7 +208,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction node.getId().equals(task.getExecutorNode())); for (PersistentTasksCustomMetaData.PersistentTask assignedTask : assignedTasks) { - JobTaskStatus jobTaskState = (JobTaskStatus) assignedTask.getStatus(); + JobTaskState jobTaskState = (JobTaskState) assignedTask.getState(); JobState jobState; if (jobTaskState == null || // executor node didn't have the chance to set job status to OPENING // previous executor node failed and current executor node didn't have the chance to set job status to OPENING @@ -675,14 +675,14 @@ public class TransportOpenJobAction extends TransportMasterNodeAction persistentTask) { JobState jobState = JobState.CLOSED; if (persistentTask != null) { - JobTaskStatus jobStateStatus = (JobTaskStatus) persistentTask.getStatus(); - jobState = jobStateStatus == null ? JobState.OPENING : jobStateStatus.getState(); + JobTaskState jobTaskState = (JobTaskState) persistentTask.getState(); + jobState = jobTaskState == null ? JobState.OPENING : jobTaskState.getState(); PersistentTasksCustomMetaData.Assignment assignment = persistentTask.getAssignment(); // This logic is only appropriate when opening a job, not when reallocating following a failure, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 3d261864ab4..b13ed6d6984 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -274,8 +274,9 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction listener) { - DatafeedState taskStatus = DatafeedState.STOPPING; - datafeedTaskTask.updatePersistentStatus(taskStatus, ActionListener.wrap(task -> { + DatafeedState taskState = DatafeedState.STOPPING; + datafeedTask.updatePersistentTaskState(taskState, ActionListener.wrap(task -> { // we need to fork because we are now on a network threadpool threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override @@ -235,7 +235,7 @@ public class TransportStopDatafeedAction extends TransportTasksAction { Holder holder = new Holder(task, datafeed, datafeedJob, new ProblemTracker(auditor, job.getId()), taskHandler); runningDatafeedsOnThisNode.put(task.getAllocationId(), holder); - task.updatePersistentStatus(DatafeedState.STARTED, new ActionListener>() { + task.updatePersistentTaskState(DatafeedState.STARTED, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { taskRunner.runWhenJobIsOpened(task); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index 0eb57ab79be..bebf0f3935d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,12 +12,12 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; import java.util.Objects; @@ -64,11 +64,11 @@ public class DatafeedNodeSelector { PriorityFailureCollector priorityFailureCollector = new PriorityFailureCollector(); priorityFailureCollector.add(verifyIndicesActive(datafeed)); - JobTaskStatus taskStatus = null; + JobTaskState jobTaskState = null; JobState jobState = JobState.CLOSED; if (jobTask != null) { - taskStatus = (JobTaskStatus) jobTask.getStatus(); - jobState = taskStatus == null ? JobState.OPENING : taskStatus.getState(); + jobTaskState = (JobTaskState) jobTask.getState(); + jobState = jobTaskState == null ? JobState.OPENING : jobTaskState.getState(); } if (jobState.isAnyOf(JobState.OPENING, JobState.OPENED) == false) { @@ -78,8 +78,8 @@ public class DatafeedNodeSelector { priorityFailureCollector.add(new AssignmentFailure(reason, true)); } - if (taskStatus != null && taskStatus.isStatusStale(jobTask)) { - String reason = "cannot start datafeed [" + datafeed.getId() + "], job [" + datafeed.getJobId() + "] status is stale"; + if (jobTaskState != null && jobTaskState.isStatusStale(jobTask)) { + String reason = "cannot start datafeed [" + datafeed.getId() + "], job [" + datafeed.getJobId() + "] state is stale"; priorityFailureCollector.add(new AssignmentFailure(reason, true)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index d3a848ef382..b6efb688c17 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; @@ -623,8 +623,8 @@ public class AutodetectProcessManager extends AbstractComponent { } void setJobState(JobTask jobTask, JobState state) { - JobTaskStatus taskStatus = new JobTaskStatus(state, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, new ActionListener>() { + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(jobTaskState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { logger.info("Successfully set job state to [{}] for job [{}]", state, jobTask.getJobId()); @@ -638,8 +638,8 @@ public class AutodetectProcessManager extends AbstractComponent { } void setJobState(JobTask jobTask, JobState state, CheckedConsumer handler) { - JobTaskStatus taskStatus = new JobTaskStatus(state, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, new ActionListener>() { + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(jobTaskState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { try { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java index 8049b5655d6..f6fb2db3c9b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -363,7 +363,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase { new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); assertEquals(JobState.OPENING, MlMetadata.getJobState("foo", tasksBuilder.build())); - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId("foo"), new JobTaskStatus(JobState.OPENED, tasksBuilder.getLastAllocationId())); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId("foo"), new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId())); assertEquals(JobState.OPENED, MlMetadata.getJobState("foo", tasksBuilder.build())); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index f1679b8b0b9..d65fc1476e7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -314,7 +314,7 @@ public class TransportCloseJobActionTests extends ESTestCase { PersistentTasksCustomMetaData.Builder tasks) { tasks.addTask(MLMetadataField.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new Assignment(nodeId, "test assignment")); - tasks.updateTaskStatus(MLMetadataField.datafeedTaskId(datafeedId), state); + tasks.updateTaskState(MLMetadataField.datafeedTaskId(datafeedId), state); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 6ef2d92d9c7..b5a315d9687 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -42,7 +42,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.Operator; import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -329,7 +329,7 @@ public class TransportOpenJobActionTests extends ESTestCase { assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId("job_id6"), null); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId("job_id6"), null); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); @@ -630,7 +630,7 @@ public class TransportOpenJobActionTests extends ESTestCase { builder.addTask(MlMetadata.jobTaskId(jobId), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams(jobId), new Assignment(nodeId, "test assignment")); if (jobState != null) { - builder.updateTaskStatus(MlMetadata.jobTaskId(jobId), new JobTaskStatus(jobState, builder.getLastAllocationId())); + builder.updateTaskState(MlMetadata.jobTaskId(jobId), new JobTaskState(jobState, builder.getLastAllocationId())); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java index a61709be424..55a0f4006bc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java @@ -31,7 +31,7 @@ public class TransportStopDatafeedActionTests extends ESTestCase { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); tasksBuilder.addTask(MLMetadataField.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams("foo", 0L), new PersistentTasksCustomMetaData.Assignment("node_id", "")); - tasksBuilder.updateTaskStatus(MLMetadataField.datafeedTaskId("foo"), DatafeedState.STARTED); + tasksBuilder.updateTaskState(MLMetadataField.datafeedTaskId("foo"), DatafeedState.STARTED); tasksBuilder.build(); Job job = createDatafeedJob().build(new Date()); @@ -121,6 +121,6 @@ public class TransportStopDatafeedActionTests extends ESTestCase { taskBuilder.addTask(MLMetadataField.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new PersistentTasksCustomMetaData.Assignment(nodeId, "test assignment")); - taskBuilder.updateTaskStatus(MLMetadataField.datafeedTaskId(datafeedId), state); + taskBuilder.updateTaskState(MLMetadataField.datafeedTaskId(datafeedId), state); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index bd722ebf8ef..f609f0c8c5e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -378,7 +378,7 @@ public class DatafeedManagerTests extends ESTestCase { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; - }).when(task).updatePersistentStatus(any(), any()); + }).when(task).updatePersistentTaskState(any(), any()); return task; } @@ -394,7 +394,7 @@ public class DatafeedManagerTests extends ESTestCase { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; - }).when(task).updatePersistentStatus(any(), any()); + }).when(task).updatePersistentTaskState(any(), any()); return task; } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 0fee78611a7..96ae3b5ef38 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.junit.Before; @@ -255,20 +255,20 @@ public class DatafeedNodeSelectorTests extends ESTestCase { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), nodeId, JobState.OPENED, tasksBuilder); // Set to lower allocationId, so job task is stale: - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId(job.getId()), new JobTaskStatus(JobState.OPENED, 0)); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0)); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); assertNull(result.getExecutorNode()); - assertEquals("cannot start datafeed [datafeed_id], job [job_id] status is stale", + assertEquals("cannot start datafeed [datafeed_id], job [job_id] state is stale", result.getExplanation()); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id], job [job_id] status is stale]")); + + "[cannot start datafeed [datafeed_id], job [job_id] state is stale]")); tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id1", JobState.OPENED, tasksBuilder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index ce47fb0adf8..e3d67bb0bdb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -39,7 +39,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; @@ -211,9 +211,9 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase { DiscoveryNode node = clusterState.nodes().resolveNode(task.getExecutorNode()); assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(JobState.OPENED, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(JobState.OPENED, jobTaskState.getState()); }); logger.info("stop the only running ml node"); @@ -264,7 +264,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase { for (DiscoveryNode node : event.state().nodes()) { Collection> foundTasks = tasks.findTasks(OpenJobAction.TASK_NAME, task -> { - JobTaskStatus jobTaskState = (JobTaskStatus) task.getStatus(); + JobTaskState jobTaskState = (JobTaskState) task.getState(); return node.getId().equals(task.getExecutorNode()) && (jobTaskState == null || jobTaskState.isStatusStale(task)); }); @@ -396,9 +396,9 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase { assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(expectedState, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(expectedState, jobTaskState.getState()); } else { assertNull(task.getExecutorNode()); } @@ -411,9 +411,9 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase { assertEquals(numJobs, tasks.taskMap().size()); for (PersistentTask task : tasks.taskMap().values()) { assertNotNull(task.getExecutorNode()); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(JobState.OPENED, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(JobState.OPENED, jobTaskState.getState()); } }; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index 17e7b89978e..f06b73fcd40 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -58,7 +58,7 @@ public class TooManyJobsIT extends BaseMlIntegTestCase { assertEquals(1, tasks.taskMap().size()); // now just double check that the first job is still opened: PersistentTasksCustomMetaData.PersistentTask task = tasks.getTask(MlMetadata.jobTaskId("close-failed-job-1")); - assertEquals(JobState.OPENED, ((JobTaskStatus) task.getStatus()).getState()); + assertEquals(JobState.OPENED, ((JobTaskState) task.getState()).getState()); } public void testSingleNode() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java similarity index 53% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java index 7183235b6ff..4dfd1965804 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java @@ -9,22 +9,22 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; -public class JobTaskStatusTests extends AbstractSerializingTestCase { +public class JobTaskStateTests extends AbstractSerializingTestCase { @Override - protected JobTaskStatus createTestInstance() { - return new JobTaskStatus(randomFrom(JobState.values()), randomLong()); + protected JobTaskState createTestInstance() { + return new JobTaskState(randomFrom(JobState.values()), randomLong()); } @Override - protected Writeable.Reader instanceReader() { - return JobTaskStatus::new; + protected Writeable.Reader instanceReader() { + return JobTaskState::new; } @Override - protected JobTaskStatus doParseInstance(XContentParser parser) { - return JobTaskStatus.fromXContent(parser); + protected JobTaskState doParseInstance(XContentParser parser) { + return JobTaskState.fromXContent(parser); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index c3e830553a2..fa41cf0918f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; @@ -199,7 +199,7 @@ public class AutodetectProcessManagerTests extends ESTestCase { manager.openJob(jobTask, e -> {}); assertEquals(1, manager.numberOfOpenJobs()); assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); - verify(jobTask).updatePersistentStatus(eq(new JobTaskStatus(JobState.OPENED, 1L)), any()); + verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L)), any()); } public void testOpenJob_exceedMaxNumJobs() { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 425629c248c..50b3f21800d 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -19,6 +19,7 @@ import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; @@ -62,7 +63,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE } @Override - protected void nodeOperation(AllocatedPersistentTask task, @Nullable RollupJob params, Status status) { + protected void nodeOperation(AllocatedPersistentTask task, @Nullable RollupJob params, PersistentTaskState state) { RollupJobTask rollupJobTask = (RollupJobTask) task; SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(SCHEDULE_NAME + "_" + params.getConfig().getId(), new CronSchedule(params.getConfig().getCron())); @@ -80,7 +81,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { return new RollupJobTask(id, type, action, parentTaskId, persistentTask.getParams(), - (RollupJobStatus) persistentTask.getStatus(), client, schedulerEngine, threadPool, headers); + (RollupJobStatus) persistentTask.getState(), client, schedulerEngine, threadPool, headers); } } @@ -115,15 +116,15 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE } @Override - protected void doSaveState(IndexerState state, Map position, Runnable next) { - if (state.equals(IndexerState.ABORTING)) { + protected void doSaveState(IndexerState indexerState, Map position, Runnable next) { + if (indexerState.equals(IndexerState.ABORTING)) { // If we're aborting, just invoke `next` (which is likely an onFailure handler) next.run(); } else { // Otherwise, attempt to persist our state - final RollupJobStatus status = new RollupJobStatus(state, getPosition()); - logger.debug("Updating persistent status of job [" + job.getConfig().getId() + "] to [" + state.toString() + "]"); - updatePersistentStatus(status, ActionListener.wrap(task -> next.run(), exc -> next.run())); + final RollupJobStatus state = new RollupJobStatus(indexerState, getPosition()); + logger.debug("Updating persistent state of job [" + job.getConfig().getId() + "] to [" + indexerState.toString() + "]"); + updatePersistentTaskState(state, ActionListener.wrap(task -> next.run(), exc -> next.run())); } } @@ -148,7 +149,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE private final ThreadPool threadPool; private final RollupIndexer indexer; - RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus status, + RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus state, Client client, SchedulerEngine schedulerEngine, ThreadPool threadPool, Map headers) { super(id, type, action, RollupField.NAME + "_" + job.getConfig().getId(), parentTask, headers); this.job = job; @@ -158,16 +159,17 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE // If status is not null, we are resuming rather than starting fresh. Map initialPosition = null; IndexerState initialState = IndexerState.STOPPED; - if (status != null) { - logger.debug("We have existing status, setting state to [" + status.getState() + "] " + - "and current position to [" + status.getPosition() + "] for job [" + job.getConfig().getId() + "]"); - if (status.getState().equals(IndexerState.INDEXING)) { + if (state != null) { + final IndexerState existingState = state.getIndexerState(); + logger.debug("We have existing state, setting state to [" + existingState + "] " + + "and current position to [" + state.getPosition() + "] for job [" + job.getConfig().getId() + "]"); + if (existingState.equals(IndexerState.INDEXING)) { /* * If we were indexing, we have to reset back to STARTED otherwise the indexer will be "stuck" thinking * it is indexing but without the actual indexing thread running. */ initialState = IndexerState.STARTED; - } else if (status.getState().equals(IndexerState.ABORTING) || status.getState().equals(IndexerState.STOPPING)) { + } else if (existingState.equals(IndexerState.ABORTING) || existingState.equals(IndexerState.STOPPING)) { // It shouldn't be possible to persist ABORTING, but if for some reason it does, // play it safe and restore the job as STOPPED. An admin will have to clean it up, // but it won't be running, and won't delete itself either. Safest option. @@ -175,9 +177,9 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE // to restore as STOPEPD initialState = IndexerState.STOPPED; } else { - initialState = status.getState(); + initialState = existingState; } - initialPosition = status.getPosition(); + initialPosition = state.getPosition(); } this.indexer = new ClientRollupPageManager(job, initialState, initialPosition, new ParentTaskAssigningClient(client, new TaskId(getPersistentTaskId()))); @@ -227,20 +229,20 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE + " state was [" + newState + "]")); return; } - final RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition()); - logger.debug("Updating status for rollup job [" + job.getConfig().getId() + "] to [" + status.getState() + "][" + - status.getPosition() + "]"); - updatePersistentStatus(status, + final RollupJobStatus state = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition()); + logger.debug("Updating state for rollup job [" + job.getConfig().getId() + "] to [" + state.getIndexerState() + "][" + + state.getPosition() + "]"); + updatePersistentTaskState(state, ActionListener.wrap( (task) -> { - logger.debug("Succesfully updated status for rollup job [" + job.getConfig().getId() + "] to [" - + status.getState() + "][" + status.getPosition() + "]"); + logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId() + "] to [" + + state.getIndexerState() + "][" + state.getPosition() + "]"); listener.onResponse(new StartRollupJobAction.Response(true)); }, (exc) -> { listener.onFailure( - new ElasticsearchException("Error while updating status for rollup job [" + job.getConfig().getId() - + "] to [" + status.getState() + "].", exc) + new ElasticsearchException("Error while updating state for rollup job [" + job.getConfig().getId() + + "] to [" + state.getIndexerState() + "].", exc) ); } ) @@ -268,17 +270,17 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE case STOPPING: // update the persistent state only if there is no background job running, // otherwise the state is updated by the indexer when the background job detects the STOPPING state. - RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, indexer.getPosition()); - updatePersistentStatus(status, + RollupJobStatus state = new RollupJobStatus(IndexerState.STOPPED, indexer.getPosition()); + updatePersistentTaskState(state, ActionListener.wrap( (task) -> { - logger.debug("Succesfully updated status for rollup job [" + job.getConfig().getId() - + "] to [" + status.getState() + "]"); + logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId() + + "] to [" + state.getIndexerState() + "]"); listener.onResponse(new StopRollupJobAction.Response(true)); }, (exc) -> { - listener.onFailure(new ElasticsearchException("Error while updating status for rollup job [" - + job.getConfig().getId() + "] to [" + status.getState() + "].", exc)); + listener.onFailure(new ElasticsearchException("Error while updating state for rollup job [" + + job.getConfig().getId() + "] to [" + state.getIndexerState() + "].", exc)); }) ); break; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java index ce8bf936d97..3f930cb4298 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java @@ -7,10 +7,8 @@ package org.elasticsearch.xpack.rollup; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; @@ -27,7 +25,6 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Netty4Plugin; @@ -104,7 +101,7 @@ public class RollupIT extends ESIntegTestCase { } @Before - public void createIndex() throws Exception { + public void createIndex() { client().admin().indices().prepareCreate("test-1").addMapping("doc", "{\"doc\": {\"properties\": {" + "\"date_histo\": {\"type\": \"date\"}, " + "\"histo\": {\"type\": \"integer\"}, " + @@ -125,7 +122,7 @@ public class RollupIT extends ESIntegTestCase { } } } - BulkResponse response = bulk.get(); + bulk.get(); client().admin().indices().prepareRefresh("test-1").get(); } @@ -195,27 +192,23 @@ public class RollupIT extends ESIntegTestCase { // Make sure it started ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "testIndexPattern"); - if (rollupJobStatus == null) {; + RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); + if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); }, 60, TimeUnit.SECONDS); // And wait for it to finish ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "testIndexPattern"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); }, 60, TimeUnit.SECONDS); @@ -274,23 +267,20 @@ public class RollupIT extends ESIntegTestCase { // Make sure it started ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job1"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); }, 60, TimeUnit.SECONDS); //but not the other task ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job2"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STOPPED)); }, 60, TimeUnit.SECONDS); @@ -301,9 +291,7 @@ public class RollupIT extends ESIntegTestCase { // Make sure the first job's task is gone ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job1"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); assertTrue(rollupJobStatus == null); }, 60, TimeUnit.SECONDS); @@ -320,10 +308,9 @@ public class RollupIT extends ESIntegTestCase { // and still STOPPED ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job2"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STOPPED)); }, 60, TimeUnit.SECONDS); } @@ -404,19 +391,17 @@ public class RollupIT extends ESIntegTestCase { Assert.assertThat(response.isStarted(), equalTo(true)); ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, taskId); + RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); logger.error("state: [" + state + "]"); assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); }, 60, TimeUnit.SECONDS); - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, taskId); + RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); if (rollupJobStatus == null) { Assert.fail("rollup job status should not be null"); } @@ -481,11 +466,13 @@ public class RollupIT extends ESIntegTestCase { } } - private RollupJobStatus getRollupJobStatus(ListTasksResponse tasksResponse, String taskId) { - for (TaskInfo task : tasksResponse.getTasks()) { - if (task.getDescription().equals("rollup_" + taskId)) { - return ((RollupJobStatus) task.getStatus()); - } + private RollupJobStatus getRollupJobStatus(final String taskId) { + final GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(taskId); + final GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, request).actionGet(); + + if (response.getJobs() != null && response.getJobs().isEmpty() == false) { + assertThat("Expect 1 rollup job with id " + taskId, response.getJobs().size(), equalTo(1)); + return response.getJobs().iterator().next().getStatus(); } return null; } @@ -498,13 +485,13 @@ public class RollupIT extends ESIntegTestCase { for (GetRollupJobsAction.JobWrapper job : response.getJobs()) { StopRollupJobAction.Request stopRequest = new StopRollupJobAction.Request(job.getJob().getId()); try { - StopRollupJobAction.Response stopResponse = client().execute(StopRollupJobAction.INSTANCE, stopRequest).get(); + client().execute(StopRollupJobAction.INSTANCE, stopRequest).get(); } catch (ElasticsearchException e) { // } DeleteRollupJobAction.Request deleteRequest = new DeleteRollupJobAction.Request(job.getJob().getId()); - DeleteRollupJobAction.Response deleteResponse = client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); + client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); } } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index d12be5e6fc1..ffcae267340 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; @@ -64,7 +65,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -77,7 +78,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -90,7 +91,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -103,7 +104,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -116,7 +117,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -128,7 +129,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); } @@ -140,7 +141,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -172,13 +173,14 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); if (c == 0) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); } else if (c == 1) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else { fail("Should not have updated persistent statuse > 2 times"); } @@ -187,7 +189,7 @@ public class RollupJobTaskTests extends ESTestCase { counter.incrementAndGet(); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); CountDownLatch latch = new CountDownLatch(1); @@ -195,7 +197,7 @@ public class RollupJobTaskTests extends ESTestCase { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); latch.countDown(); } @@ -207,7 +209,7 @@ public class RollupJobTaskTests extends ESTestCase { latch.await(3, TimeUnit.SECONDS); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); task.stop(new ActionListener() { @@ -248,14 +250,15 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -264,7 +267,7 @@ public class RollupJobTaskTests extends ESTestCase { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); latch.countDown(); } @@ -285,14 +288,15 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -301,7 +305,7 @@ public class RollupJobTaskTests extends ESTestCase { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); latch.countDown(); } @@ -313,7 +317,7 @@ public class RollupJobTaskTests extends ESTestCase { latch.await(3, TimeUnit.SECONDS); task.triggered(new SchedulerEngine.Event("unrelated", 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); // Should still be started, not INDEXING + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); } public void testTrigger() throws InterruptedException { @@ -325,14 +329,15 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); CountDownLatch latch = new CountDownLatch(1); @@ -340,7 +345,7 @@ public class RollupJobTaskTests extends ESTestCase { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); latch.countDown(); } @@ -352,7 +357,7 @@ public class RollupJobTaskTests extends ESTestCase { latch.await(3, TimeUnit.SECONDS); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); } @@ -392,11 +397,12 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } else if (counterValue == 1) { @@ -405,14 +411,14 @@ public class RollupJobTaskTests extends ESTestCase { } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); task.start(new ActionListener() { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); started.set(true); } @@ -424,7 +430,7 @@ public class RollupJobTaskTests extends ESTestCase { ESTestCase.awaitBusy(started::get); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); // Should still be started, not INDEXING + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); // Allow search response to return now latch.countDown(); @@ -475,11 +481,12 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } else if (counterValue == 1) { @@ -488,14 +495,14 @@ public class RollupJobTaskTests extends ESTestCase { } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); task.start(new ActionListener() { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); started.set(true); } @@ -507,7 +514,7 @@ public class RollupJobTaskTests extends ESTestCase { ESTestCase.awaitBusy(started::get); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); // Should still be started, not INDEXING + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); // Allow search response to return now latch.countDown(); @@ -524,7 +531,7 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); CountDownLatch latch = new CountDownLatch(1); task.stop(new ActionListener() { @@ -553,15 +560,16 @@ public class RollupJobTaskTests extends ESTestCase { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); if (c == 0) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); } else if (c == 1) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else if (c == 2) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else { fail("Should not have updated persistent statuse > 3 times"); } @@ -571,7 +579,7 @@ public class RollupJobTaskTests extends ESTestCase { } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); CountDownLatch latch = new CountDownLatch(1); @@ -579,7 +587,7 @@ public class RollupJobTaskTests extends ESTestCase { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); latch.countDown(); } @@ -591,7 +599,7 @@ public class RollupJobTaskTests extends ESTestCase { latch.await(3, TimeUnit.SECONDS); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); task.stop(new ActionListener() { @@ -642,7 +650,7 @@ public class RollupJobTaskTests extends ESTestCase { latch.countDown(); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); task.onCancelled(); task.stop(new ActionListener() { diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index f70efc72506..9057db476ad 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -26,13 +26,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.Netty4Plugin; @@ -70,7 +70,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -449,8 +449,8 @@ abstract class MlNativeAutodetectIntegTestCase extends ESIntegTestCase { StartDatafeedAction.DatafeedParams::new)); entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, OpenJobAction.JobParams::new)); - entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new)); - entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, DatafeedState.NAME, DatafeedState::fromStream)); entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetaData.TYPE, TokenMetaData::new)); final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); From ca00deb8addf6cfc5f18ec0e7abd18236f9dc6c3 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 15 Jun 2018 10:54:13 +0100 Subject: [PATCH 07/15] [ML] Re-enable tests muted in #30982 --- .../rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml | 3 --- .../rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml | 6 ------ 2 files changed, 9 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index c1b238422e9..ba0f4d5091e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,8 +1,5 @@ --- "Test get old cluster job": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.get_jobs: job_id: old-cluster-job diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 6634722fac9..bb47524b41d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -8,9 +8,6 @@ setup: --- "Test open old jobs": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.open_job: job_id: old-cluster-job @@ -77,9 +74,6 @@ setup: --- "Test job with no model memory limit has established model memory after reopening": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.open_job: job_id: no-model-memory-limit-job From a0d6c19e7568077ad97a047c9b985c468ed6a465 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 15 Jun 2018 11:56:16 +0200 Subject: [PATCH 08/15] Add details section for dcg ranking metric (#31177) While the other two ranking evaluation metrics (precicion and reciprocal rank) already provide a more detailed output for how their score is calculated, the discounted cumulative gain metric (dcg) and its normalized variant are lacking this until now. Its not really clear which level of detail might be useful for debugging and understanding the final metric calculation, but this change adds a `metric_details` section to REST output that contains some information about the evaluation details. --- .../client/RestHighLevelClientTests.java | 6 +- .../rankeval/DiscountedCumulativeGain.java | 142 ++++++++++++++++-- .../RankEvalNamedXContentProvider.java | 2 + .../index/rankeval/RankEvalPlugin.java | 5 +- .../DiscountedCumulativeGainTests.java | 26 +++- .../index/rankeval/EvalQueryQualityTests.java | 14 +- 6 files changed, 174 insertions(+), 21 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 49a84146dc8..2925062e0e7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -607,7 +608,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(7, namedXContents.size()); + assertEquals(8, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -625,9 +626,10 @@ public class RestHighLevelClientTests extends ESTestCase { assertTrue(names.contains(PrecisionAtK.NAME)); assertTrue(names.contains(DiscountedCumulativeGain.NAME)); assertTrue(names.contains(MeanReciprocalRank.NAME)); - assertEquals(Integer.valueOf(2), categories.get(MetricDetail.class)); + assertEquals(Integer.valueOf(3), categories.get(MetricDetail.class)); assertTrue(names.contains(PrecisionAtK.NAME)); assertTrue(names.contains(MeanReciprocalRank.NAME)); + assertTrue(names.contains(DiscountedCumulativeGain.NAME)); } private static class TrackingActionListener implements ActionListener { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index 13926d7d362..01a6e35299b 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -36,6 +36,7 @@ import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRatings; @@ -129,26 +130,31 @@ public class DiscountedCumulativeGain implements EvaluationMetric { .collect(Collectors.toList()); List ratedHits = joinHitsWithRatings(hits, ratedDocs); List ratingsInSearchHits = new ArrayList<>(ratedHits.size()); + int unratedResults = 0; for (RatedSearchHit hit : ratedHits) { - // unknownDocRating might be null, which means it will be unrated docs are - // ignored in the dcg calculation - // we still need to add them as a placeholder so the rank of the subsequent - // ratings is correct + // unknownDocRating might be null, in which case unrated docs will be ignored in the dcg calculation. + // we still need to add them as a placeholder so the rank of the subsequent ratings is correct ratingsInSearchHits.add(hit.getRating().orElse(unknownDocRating)); + if (hit.getRating().isPresent() == false) { + unratedResults++; + } } - double dcg = computeDCG(ratingsInSearchHits); + final double dcg = computeDCG(ratingsInSearchHits); + double result = dcg; + double idcg = 0; if (normalize) { Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder())); - double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); - if (idcg > 0) { - dcg = dcg / idcg; + idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); + if (idcg != 0) { + result = dcg / idcg; } else { - dcg = 0; + result = 0; } } - EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg); + EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, result); evalQueryQuality.addHitsAndRatings(ratedHits); + evalQueryQuality.setMetricDetails(new Detail(dcg, idcg, unratedResults)); return evalQueryQuality; } @@ -167,7 +173,7 @@ public class DiscountedCumulativeGain implements EvaluationMetric { private static final ParseField K_FIELD = new ParseField("k"); private static final ParseField NORMALIZE_FIELD = new ParseField("normalize"); private static final ParseField UNKNOWN_DOC_RATING_FIELD = new ParseField("unknown_doc_rating"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg_at", false, + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg", false, args -> { Boolean normalized = (Boolean) args[0]; Integer optK = (Integer) args[2]; @@ -217,4 +223,118 @@ public class DiscountedCumulativeGain implements EvaluationMetric { public final int hashCode() { return Objects.hash(normalize, unknownDocRating, k); } + + public static final class Detail implements MetricDetail { + + private static ParseField DCG_FIELD = new ParseField("dcg"); + private static ParseField IDCG_FIELD = new ParseField("ideal_dcg"); + private static ParseField NDCG_FIELD = new ParseField("normalized_dcg"); + private static ParseField UNRATED_FIELD = new ParseField("unrated_docs"); + private final double dcg; + private final double idcg; + private final int unratedDocs; + + Detail(double dcg, double idcg, int unratedDocs) { + this.dcg = dcg; + this.idcg = idcg; + this.unratedDocs = unratedDocs; + } + + Detail(StreamInput in) throws IOException { + this.dcg = in.readDouble(); + this.idcg = in.readDouble(); + this.unratedDocs = in.readVInt(); + } + + @Override + public + String getMetricName() { + return NAME; + } + + @Override + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DCG_FIELD.getPreferredName(), this.dcg); + if (this.idcg != 0) { + builder.field(IDCG_FIELD.getPreferredName(), this.idcg); + builder.field(NDCG_FIELD.getPreferredName(), this.dcg / this.idcg); + } + builder.field(UNRATED_FIELD.getPreferredName(), this.unratedDocs); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Double) args[0], (Double) args[1] != null ? (Double) args[1] : 0.0d, (Integer) args[2]); + }); + + static { + PARSER.declareDouble(constructorArg(), DCG_FIELD); + PARSER.declareDouble(optionalConstructorArg(), IDCG_FIELD); + PARSER.declareInt(constructorArg(), UNRATED_FIELD); + } + + public static Detail fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(this.dcg); + out.writeDouble(this.idcg); + out.writeVInt(this.unratedDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * @return the discounted cumulative gain + */ + public double getDCG() { + return this.dcg; + } + + /** + * @return the ideal discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required + */ + public double getIDCG() { + return this.idcg; + } + + /** + * @return the normalized discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required + */ + public double getNDCG() { + return (this.idcg != 0) ? this.dcg / this.idcg : 0; + } + + /** + * @return the number of unrated documents in the search results + */ + public Object getUnratedDocs() { + return this.unratedDocs; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + DiscountedCumulativeGain.Detail other = (DiscountedCumulativeGain.Detail) obj; + return (this.dcg == other.dcg && + this.idcg == other.idcg && + this.unratedDocs == other.unratedDocs); + } + + @Override + public int hashCode() { + return Objects.hash(this.dcg, this.idcg, this.unratedDocs); + } + } } + diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java index c5785ca3847..f2176113cdf 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java @@ -41,6 +41,8 @@ public class RankEvalNamedXContentProvider implements NamedXContentProvider { PrecisionAtK.Detail::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(MeanReciprocalRank.NAME), MeanReciprocalRank.Detail::fromXContent)); + namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(DiscountedCumulativeGain.NAME), + DiscountedCumulativeGain.Detail::fromXContent)); return namedXContent; } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java index 884cf3bafdc..8ac2b7fbee5 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java @@ -61,8 +61,9 @@ public class RankEvalPlugin extends Plugin implements ActionPlugin { namedWriteables.add( new NamedWriteableRegistry.Entry(EvaluationMetric.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Detail::new)); - namedWriteables - .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); + namedWriteables.add( + new NamedWriteableRegistry.Entry(MetricDetail.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain.Detail::new)); return namedWriteables; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 64337786b1e..24ac600a113 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; @@ -254,9 +255,8 @@ public class DiscountedCumulativeGainTests extends ESTestCase { public static DiscountedCumulativeGain createTestItem() { boolean normalize = randomBoolean(); - Integer unknownDocRating = Integer.valueOf(randomIntBetween(0, 1000)); - - return new DiscountedCumulativeGain(normalize, unknownDocRating, 10); + Integer unknownDocRating = frequently() ? Integer.valueOf(randomIntBetween(0, 1000)) : null; + return new DiscountedCumulativeGain(normalize, unknownDocRating, randomIntBetween(1, 10)); } public void testXContentRoundtrip() throws IOException { @@ -283,7 +283,25 @@ public class DiscountedCumulativeGainTests extends ESTestCase { parser.nextToken(); XContentParseException exception = expectThrows(XContentParseException.class, () -> DiscountedCumulativeGain.fromXContent(parser)); - assertThat(exception.getMessage(), containsString("[dcg_at] unknown field")); + assertThat(exception.getMessage(), containsString("[dcg] unknown field")); + } + } + + public void testMetricDetails() { + double dcg = randomDoubleBetween(0, 1, true); + double idcg = randomBoolean() ? 0.0 : randomDoubleBetween(0, 1, true); + double expectedNdcg = idcg != 0 ? dcg / idcg : 0.0; + int unratedDocs = randomIntBetween(0, 100); + DiscountedCumulativeGain.Detail detail = new DiscountedCumulativeGain.Detail(dcg, idcg, unratedDocs); + assertEquals(dcg, detail.getDCG(), 0.0); + assertEquals(idcg, detail.getIDCG(), 0.0); + assertEquals(expectedNdcg, detail.getNDCG(), 0.0); + assertEquals(unratedDocs, detail.getUnratedDocs()); + if (idcg != 0) { + assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"ideal_dcg\":" + idcg + ",\"normalized_dcg\":" + expectedNdcg + + ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail)); + } else { + assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail)); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java index 112cf4eaaf7..e9fae6b5c63 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java @@ -68,10 +68,20 @@ public class EvalQueryQualityTests extends ESTestCase { EvalQueryQuality evalQueryQuality = new EvalQueryQuality(randomAlphaOfLength(10), randomDoubleBetween(0.0, 1.0, true)); if (randomBoolean()) { - if (randomBoolean()) { + int metricDetail = randomIntBetween(0, 2); + switch (metricDetail) { + case 0: evalQueryQuality.setMetricDetails(new PrecisionAtK.Detail(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); - } else { + break; + case 1: evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Detail(randomIntBetween(0, 1000))); + break; + case 2: + evalQueryQuality.setMetricDetails(new DiscountedCumulativeGain.Detail(randomDoubleBetween(0, 1, true), + randomBoolean() ? randomDoubleBetween(0, 1, true) : 0, randomInt())); + break; + default: + throw new IllegalArgumentException("illegal randomized value in test"); } } evalQueryQuality.addHitsAndRatings(ratedHits); From 045f76d67f37f315fc42a3960ce88500e105f0d6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 08:02:47 -0400 Subject: [PATCH 09/15] LLClient: Fix assertion on windows In windows the exception message is ever so slightly differant than in Linux and OSX. That is fine. We'll just catch either. --- .../client/RestClientMultipleHostsIntegTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 92a960090ad..d09741ea25b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -42,7 +42,9 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -214,7 +216,8 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { restClient.performRequest(request); fail("expected to fail to connect"); } catch (ConnectException e) { - assertEquals("Connection refused", e.getMessage()); + // This is different in windows and linux but this matches both. + assertThat(e.getMessage(), startsWith("Connection refused")); } } else { Response response = restClient.performRequest(request); From 856936c2863799f250818b4b7582aa707a4dcf92 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 08:04:54 -0400 Subject: [PATCH 10/15] REST Client: NodeSelector for node attributes (#31296) Add a `NodeSelector` so that users can filter the nodes that receive requests based on node attributes. I believe we'll need this to backport #30523 and we want it anyway. I also added a bash script to help with rebuilding the sniffer parsing test documents. --- .../client/HasAttributeNodeSelector.java | 56 +++ .../java/org/elasticsearch/client/Node.java | 27 +- .../client/HasAttributeNodeSelectorTests.java | 59 +++ .../client/NodeSelectorTests.java | 5 +- .../org/elasticsearch/client/NodeTests.java | 50 ++- .../client/RestClientMultipleHostsTests.java | 2 +- .../elasticsearch/client/RestClientTests.java | 6 +- .../RestClientDocumentation.java | 20 +- .../sniff/ElasticsearchNodesSniffer.java | 126 ++++-- .../ElasticsearchNodesSnifferParseTests.java | 34 +- .../sniff/ElasticsearchNodesSnifferTests.java | 33 +- .../src/test/resources/2.0.0_nodes_http.json | 290 ++++++++------ .../src/test/resources/5.0.0_nodes_http.json | 328 +++++++++------- .../src/test/resources/6.0.0_nodes_http.json | 362 ++++++++++-------- .../resources/create_test_nodes_info.bash | 107 ++++++ client/sniffer/src/test/resources/readme.txt | 2 + docs/java-rest/low-level/usage.asciidoc | 10 +- .../rest-api-spec/test/README.asciidoc | 17 +- .../test/rest/yaml/section/DoSection.java | 87 +++-- .../rest/yaml/section/DoSectionTests.java | 89 ++++- 20 files changed, 1172 insertions(+), 538 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java create mode 100644 client/sniffer/src/test/resources/create_test_nodes_info.bash diff --git a/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java new file mode 100644 index 00000000000..e4bb4345864 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * A {@link NodeSelector} that selects nodes that have a particular value + * for an attribute. + */ +public final class HasAttributeNodeSelector implements NodeSelector { + private final String key; + private final String value; + + public HasAttributeNodeSelector(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public void select(Iterable nodes) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Map> allAttributes = itr.next().getAttributes(); + if (allAttributes == null) continue; + List values = allAttributes.get(key); + if (values == null || false == values.contains(value)) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return key + "=" + value; + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index d66d0773016..f180b529275 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; +import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; @@ -52,13 +54,18 @@ public class Node { * if we don't know what roles the node has. */ private final Roles roles; + /** + * Attributes declared on the node. + */ + private final Map> attributes; /** * Create a {@linkplain Node} with metadata. All parameters except * {@code host} are nullable and implementations of {@link NodeSelector} * need to decide what to do in their absence. */ - public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + public Node(HttpHost host, Set boundHosts, String name, String version, + Roles roles, Map> attributes) { if (host == null) { throw new IllegalArgumentException("host cannot be null"); } @@ -67,13 +74,14 @@ public class Node { this.name = name; this.version = version; this.roles = roles; + this.attributes = attributes; } /** * Create a {@linkplain Node} without any metadata. */ public Node(HttpHost host) { - this(host, null, null, null, null); + this(host, null, null, null, null, null); } /** @@ -115,6 +123,13 @@ public class Node { return roles; } + /** + * Attributes declared on the node. + */ + public Map> getAttributes() { + return attributes; + } + @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -131,6 +146,9 @@ public class Node { if (roles != null) { b.append(", roles=").append(roles); } + if (attributes != null) { + b.append(", attributes=").append(attributes); + } return b.append(']').toString(); } @@ -144,12 +162,13 @@ public class Node { && Objects.equals(boundHosts, other.boundHosts) && Objects.equals(name, other.name) && Objects.equals(version, other.version) - && Objects.equals(roles, other.roles); + && Objects.equals(roles, other.roles) + && Objects.equals(attributes, other.attributes); } @Override public int hashCode() { - return Objects.hash(host, boundHosts, name, version, roles); + return Objects.hash(host, boundHosts, name, version, roles, attributes); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java new file mode 100644 index 00000000000..8a7c12e8c62 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; + +public class HasAttributeNodeSelectorTests extends RestClientTestCase { + public void testHasAttribute() { + Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val"))); + Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval"))); + Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval"))); + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(hasAttributeValue); + nodes.add(hasAttributeButNotValue); + nodes.add(hasAttributeValueInList); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(hasAttributeValue); + expected.add(hasAttributeValueInList); + new HasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(Map> attributes) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index d9df001ad43..868ccdcab75 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -63,9 +63,10 @@ public class NodeSelectorTests extends RestClientTestCase { assertEquals(expected, nodes); } - private Node dummyNode(boolean master, boolean data, boolean ingest) { + private static Node dummyNode(boolean master, boolean data, boolean ingest) { return new Node(new HttpHost("dummy"), Collections.emptySet(), randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Roles(master, data, ingest)); + new Roles(master, data, ingest), + Collections.>emptyMap()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index c6d60415b88..9eeeb1144f4 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -23,49 +23,67 @@ import org.apache.http.HttpHost; import org.elasticsearch.client.Node.Roles; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { public void testToString() { + Map> attributes = new HashMap<>(); + attributes.put("foo", singletonList("bar")); + attributes.put("baz", Arrays.asList("bort", "zoom")); assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, attributes={foo=[bar], baz=[bort, zoom]}]", + new Node(new HttpHost("1"), null, null, null, null, attributes).toString()); assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), - null, null, null, new Roles(true, true, true)).toString()); + null, null, null, new Roles(true, true, true), null).toString()); assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), - null, null, "ver", null).toString()); + null, null, "ver", null, null).toString()); assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), - null, "nam", null, null).toString()); + null, "nam", null, null, null).toString()); assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), - new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); - assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null, null).toString()); + assertEquals( + "[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m, attributes={foo=[bar], baz=[bort, zoom]}]", new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), - "nam", "ver", new Roles(true, false, false)).toString()); + "nam", "ver", new Roles(true, false, false), attributes).toString()); } public void testEqualsAndHashCode() { HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); Node node = new Node(host, - randomBoolean() ? null : singleton(host), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : new Roles(true, true, true)); + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true), + randomBoolean() ? null : singletonMap("foo", singletonList("bar"))); assertFalse(node.equals(null)); assertTrue(node.equals(node)); assertEquals(node.hashCode(), node.hashCode()); - Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), + node.getRoles(), node.getAttributes()); assertTrue(node.equals(copy)); assertEquals(node.hashCode(), copy.hashCode()); assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), - node.getName(), node.getVersion(), node.getRoles()))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), - node.getName(), node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", + node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion() + "changed", node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), new Roles(false, false, false), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), node.getRoles(), singletonMap("bort", singletonList("bing"))))); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index eb591f4ccff..d04b3cbb755 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -342,7 +342,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); - newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null)); } restClient.setNodes(newNodes); int rounds = between(1, 10); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 01f6f308f62..04742ccab4f 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -341,9 +341,9 @@ public class RestClientTests extends RestClientTestCase { } public void testSelectHosts() throws IOException { - Node n1 = new Node(new HttpHost("1"), null, null, "1", null); - Node n2 = new Node(new HttpHost("2"), null, null, "2", null); - Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null, null); NodeSelector not1 = new NodeSelector() { @Override diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 0cc41b078b8..d3a0202747d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,6 +36,7 @@ import org.apache.http.nio.entity.NStringEntity; import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; @@ -190,11 +191,20 @@ public class RestClientDocumentation { //tag::rest-client-options-set-singleton request.setOptions(COMMON_OPTIONS); //end::rest-client-options-set-singleton - //tag::rest-client-options-customize - RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); - options.addHeader("cats", "knock things off of other things"); - request.setOptions(options); - //end::rest-client-options-customize + { + //tag::rest-client-options-customize-header + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize-header + } + { + //tag::rest-client-options-customize-attribute + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.setNodeSelector(new HasAttributeNodeSelector("rack", "c12")); // <1> + request.setOptions(options); + //end::rest-client-options-customize-attribute + } } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index da7ef4700fd..5c947f5625b 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -36,12 +36,18 @@ import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. * Compatible with elasticsearch 2.x+. @@ -138,16 +144,19 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer { Set boundHosts = new HashSet<>(); String name = null; String version = null; - String fieldName = null; - // Used to read roles from 5.0+ + /* + * Multi-valued attributes come with key = `real_key.index` and we + * unflip them after reading them because we can't rely on the order + * that they arive. + */ + final Map protoAttributes = new HashMap(); + boolean sawRoles = false; boolean master = false; boolean data = false; boolean ingest = false; - // Used to read roles from 2.x - Boolean masterAttribute = null; - Boolean dataAttribute = null; - boolean clientAttribute = false; + + String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -170,13 +179,12 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer { } } else if ("attributes".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { - masterAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { - dataAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { - clientAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING) { + String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString()); + if (oldValue != null) { + throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]"); + } + } else { parser.skipChildren(); } } @@ -216,21 +224,74 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer { if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; - } else { - logger.trace("adding node [" + nodeId + "]"); - if (version.startsWith("2.")) { - /* - * 2.x doesn't send roles, instead we try to read them from - * attributes. - */ - master = masterAttribute == null ? false == clientAttribute : masterAttribute; - data = dataAttribute == null ? false == clientAttribute : dataAttribute; - } else { - assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + + Map> realAttributes = new HashMap<>(protoAttributes.size()); + List keys = new ArrayList<>(protoAttributes.keySet()); + for (String key : keys) { + if (key.endsWith(".0")) { + String realKey = key.substring(0, key.length() - 2); + List values = new ArrayList<>(); + int i = 0; + while (true) { + String value = protoAttributes.remove(realKey + "." + i); + if (value == null) { + break; + } + values.add(value); + i++; + } + realAttributes.put(realKey, unmodifiableList(values)); } - assert boundHosts.contains(publishedHost) : - "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; - return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); + } + for (Map.Entry entry : protoAttributes.entrySet()) { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); + } + + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); + Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); + Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + logger.trace("adding node [" + nodeId + "]"); + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest), + unmodifiableMap(realAttributes)); + } + + /** + * Returns {@code defaultValue} if the attribute didn't come back, + * {@code true} or {@code false} if it did come back as + * either of those, or throws an IOException if the attribute + * came back in a strange way. + */ + private static Boolean v2RoleAttributeValue(Map> attributes, + String name, Boolean defaultValue) throws IOException { + List valueList = attributes.remove(name); + if (valueList == null) { + return defaultValue; + } + if (valueList.size() != 1) { + throw new IOException("expected only a single attribute value for [" + name + "] but got " + + valueList); + } + switch (valueList.get(0)) { + case "true": + return true; + case "false": + return false; + default: + throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + + valueList.get(0) + "]"); } } @@ -248,15 +309,4 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer { return name; } } - - private static boolean toBoolean(String string) { - switch (string) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); - } - } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java index 712a836a17b..edc7330c130 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -30,14 +30,18 @@ import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme; import java.io.IOException; import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import com.fasterxml.jackson.core.JsonFactory; -import static org.hamcrest.Matchers.hasItem; +import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; /** @@ -53,10 +57,14 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { try { HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); - // Use these assertions because the error messages are nicer than hasItems. + /* + * Use these assertions because the error messages are nicer + * than hasItems and we know the results are in order because + * that is how we generated the file. + */ assertThat(nodes, hasSize(expected.length)); - for (Node expectedNode : expected) { - assertThat(nodes, hasItem(expectedNode)); + for (int i = 0; i < expected.length; i++) { + assertEquals(expected[i], nodes.get(i)); } } finally { in.close(); @@ -66,13 +74,13 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { public void test2x() throws IOException { checkFile("2.0.0_nodes_http.json", node(9200, "m1", "2.0.0", true, false, false), - node(9202, "m2", "2.0.0", true, true, false), - node(9201, "m3", "2.0.0", true, false, false), - node(9205, "d1", "2.0.0", false, true, false), + node(9201, "m2", "2.0.0", true, true, false), + node(9202, "m3", "2.0.0", true, false, false), + node(9203, "d1", "2.0.0", false, true, false), node(9204, "d2", "2.0.0", false, true, false), - node(9203, "d3", "2.0.0", false, true, false), - node(9207, "c1", "2.0.0", false, false, false), - node(9206, "c2", "2.0.0", false, false, false)); + node(9205, "d3", "2.0.0", false, true, false), + node(9206, "c1", "2.0.0", false, false, false), + node(9207, "c2", "2.0.0", false, false, false)); } public void test5x() throws IOException { @@ -104,6 +112,10 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { Set boundHosts = new HashSet<>(2); boundHosts.add(host); boundHosts.add(new HttpHost("[::1]", port)); - return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + Map> attributes = new HashMap<>(); + attributes.put("dummy", singletonList("everyone_has_me")); + attributes.put("number", singletonList(name.substring(1))); + attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest), attributes); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index 260832ca90e..3d2a74685af 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -200,9 +200,21 @@ public class ElasticsearchNodesSnifferTests extends RestClientTestCase { } } + int numAttributes = between(0, 5); + Map> attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + int numValues = frequently() ? 1 : between(2, 5); + List values = new ArrayList<>(); + for (int v = 0; v < numValues; v++) { + values.add(j + "value" + v); + } + attributes.put("attr" + j, values); + } + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { @@ -256,18 +268,17 @@ public class ElasticsearchNodesSnifferTests extends RestClientTestCase { generator.writeFieldName("name"); generator.writeString(node.getName()); - int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Map attributes = new HashMap<>(numAttributes); - for (int j = 0; j < numAttributes; j++) { - attributes.put("attr" + j, "value" + j); - } if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); - } - for (Map.Entry entry : attributes.entrySet()) { - generator.writeStringField(entry.getKey(), entry.getValue()); - } - if (numAttributes > 0) { + for (Map.Entry> entry : attributes.entrySet()) { + if (entry.getValue().size() == 1) { + generator.writeStringField(entry.getKey(), entry.getValue().get(0)); + } else { + for (int v = 0; v < entry.getValue().size(); v++) { + generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); + } + } + } generator.writeEndObject(); } generator.writeEndObject(); diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json index b370e78e160..22dc4ec13ed 100644 --- a/client/sniffer/src/test/resources/2.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -1,140 +1,200 @@ { - "cluster_name" : "elasticsearch", - "nodes" : { - "qYUZ_8bTRwODPxukDlFw6Q" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9204", - "attributes" : { - "master" : "false" + "cluster_name": "elasticsearch", + "nodes": { + "qr-SOrELSaGW8SlU8nflBw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9200", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "data": "false", + "array.1": "1", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9200", + "[::1]:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "Yej5UVNgR2KgBjUFHOQpCw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9207", - "attributes" : { - "data" : "false", - "master" : "false" + "osfiXxUOQzCVIs-eepgSCA": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9201", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9201", + "[::1]:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "mHttJwhwReangKEx9EGuAg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9201", - "attributes" : { - "data" : "false", - "master" : "true" + "lazeJFiIQ8eHHV4GeIdMPg": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9202", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "data": "false", + "array.1": "3", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9202", + "[::1]:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "6Erdptt_QRGLxMiLi9mTkg" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9206", - "attributes" : { - "data" : "false", - "client" : "true" + "t9WxK-fNRsqV5G0Mm09KpQ": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9203", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9203", + "[::1]:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "mLRCZBypTiys6e8KY5DMnA" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9200", - "attributes" : { - "data" : "false" + "wgoDzluvTViwUjEsmVesKw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9204", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9204", + "[::1]:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "pVqOhytXQwetsZVzCBppYw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9202", - "http" : { - "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "6j_t3pPhSm-oRTyypTzu5g": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9205", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9205", + "[::1]:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "ARyzVfpJSw2a9TOIUpbsBA" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9205", - "attributes" : { - "master" : "false" + "PaEkm0z7Ssiuyfkh3aASag": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9206", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "data": "false", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9206", + "[::1]:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "2Hpid-g5Sc2BKCevhN6VQw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9203", - "attributes" : { - "master" : "false" + "LAFKr2K_QmupqnM_atJqkQ": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9207", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "data": "false", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9207", + "[::1]:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json index 7a7d143ecaf..1358438237f 100644 --- a/client/sniffer/src/test/resources/5.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "DXz_rhcdSF2xJ96qyjaLVw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "0S4r3NurTYSFSb8R9SxwWA": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9200", "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "53Mi6jYdRgeR1cdyuoNfQQ" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "k_CBrMXARkS57Qb5-3Mw5g": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ "[::1]:9201", "127.0.0.1:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "XBIghcHiRlWP9c4vY6rETw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 - } - }, - "cFM30FlyS8K1njH_bovwwQ" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 - } - }, - "eoVUVRGNRDyyOapqIcrsIA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 - } - }, - "xPN76uDcTP-DyXaRzPg2NQ" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 - } - }, - "RY0oW2d7TISEqazk-U4Kcw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 - } - }, - "tU0rXEZmQ9GsWfn2TQ4kow" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "6eynRPQ1RleJTeGDuTR9mw": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ "[::1]:9202", "127.0.0.1:9202" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "cbGC-ay1QNWaESvEh5513w": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "LexndPpXR2ytYsU5fTElnQ": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "SbNG1DKYSBu20zfOz2gDZQ": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "fM4H-m2WTDWmsGsL7jIJew": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "pFoh7d0BTbqqI3HKd9na5A": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json index 5a8905da64c..f0535dfdfb0 100644 --- a/client/sniffer/src/test/resources/6.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "FX9npqGQSL2mOGF8Zkf3hw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9201", - "127.0.0.1:9201" - ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 - } - }, - "jmUqzYLGTbWCg127kve3Tg" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 - } - }, - "soBU6bzvTOqdLxPstSbJ2g" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 - } - }, - "mtYDAhURTP6twdmNAkMnOg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "master", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 - } - }, - "URxHiUQPROOt1G22Ev6lXw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 - } - }, - "_06S_kWoRqqFR8Z8CS3JRw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 - } - }, - "QZE5Bd6DQJmnfVs2dglOvA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", - "ingest" - ], - "http" : { - "bound_address" : [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 - } - }, - "_3mTXg6dSweZn5ReB2fQqw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", - "ingest" - ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9200", "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 + } + }, + "TMHa34w4RqeuYoHCfJGXZg": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 + } + }, + "lzaMRJTVT166sgVZdQ5thA": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 + } + }, + "tGP5sUecSd6BLTWk1NWF8Q": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 + } + }, + "c1UgW5ROTkSa2YnM_T56tw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 + } + }, + "QM9yjqjmS72MstpNYV_trg": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 + } + }, + "wLtzAssoQYeX_4TstgCj0Q": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 + } + }, + "ONOzpst8TH-ZebG7fxGwaA": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "ingest" + ], + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/create_test_nodes_info.bash b/client/sniffer/src/test/resources/create_test_nodes_info.bash new file mode 100644 index 00000000000..f4f1c09882e --- /dev/null +++ b/client/sniffer/src/test/resources/create_test_nodes_info.bash @@ -0,0 +1,107 @@ +#!/bin/bash + +# Recreates the v_nodes_http.json files in this directory. This is +# meant to be an "every once in a while" thing that we do only when +# we want to add a new version of Elasticsearch or configure the +# nodes differently. That is why we don't do this in gradle. It also +# allows us to play fast and loose with error handling. If something +# goes wrong you have to manually clean up which is good because it +# leaves around the kinds of things that we need to debug the failure. + +# I built this file so the next time I have to regenerate these +# v_nodes_http.json files I won't have to reconfigure Elasticsearch +# from scratch. While I was at it I took the time to make sure that +# when we do rebuild the files they don't jump around too much. That +# way the diffs are smaller. + +set -e + +script_path="$( cd "$(dirname "$0")" ; pwd -P )" +work=$(mktemp -d) +pushd ${work} >> /dev/null +echo Working in ${work} + +wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz +sha1sum -c - << __SHAs +e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz +d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz +__SHAs +sha512sum -c - << __SHAs +25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +__SHAs + + +function do_version() { + local version=$1 + local nodes='m1 m2 m3 d1 d2 d3 c1 c2' + rm -rf ${version} + mkdir -p ${version} + pushd ${version} >> /dev/null + + tar xf ../elasticsearch-${version}.tar.gz + local http_port=9200 + for node in ${nodes}; do + mkdir ${node} + cp -r elasticsearch-${version}/* ${node} + local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) + local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) + # m2 is always master and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) + local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local transport_port=$((http_port+100)) + + cat >> ${node}/config/elasticsearch.yml << __ES_YML +node.name: ${node} +node.master: ${master} +node.data: ${data} +node${attr}.dummy: everyone_has_me +node${attr}.number: ${node:1} +node${attr}.array: [${node:0:1}, ${node:1}] +http.port: ${http_port} +transport.tcp.port: ${transport_port} +discovery.zen.minimum_master_nodes: 3 +discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__ES_YML + + if [ ${version} != '2.0.0' ]; then + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options + fi + + echo "starting ${version}/${node}..." + ${node}/bin/elasticsearch -d -p ${node}/pidfile + + ((http_port++)) + done + + echo "waiting for cluster to form" + # got to wait for all the nodes + until curl -s localhost:9200; do + sleep .25 + done + + echo "waiting for all nodes to join" + until [ $(echo ${nodes} | wc -w) -eq $(curl -s localhost:9200/_cat/nodes | wc -l) ]; do + sleep .25 + done + + # jq sorts the nodes by their http host so the file doesn't jump around when we regenerate it + curl -s localhost:9200/_nodes/http?pretty \ + | jq '[to_entries[] | ( select(.key == "nodes").value|to_entries|sort_by(.value.http.publish_address)|from_entries|{"key": "nodes", "value": .} ) // .] | from_entries' \ + > ${script_path}/${version}_nodes_http.json + + for node in ${nodes}; do + echo "stopping ${version}/${node}..." + kill $(cat ${node}/pidfile) + done + + popd >> /dev/null +} + +JAVA_HOME=$JAVA8_HOME do_version 2.0.0 +JAVA_HOME=$JAVA8_HOME do_version 5.0.0 +JAVA_HOME=$JAVA8_HOME do_version 6.0.0 + +popd >> /dev/null +rm -rf ${work} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt index ccb9bb15edb..c6dd32a0410 100644 --- a/client/sniffer/src/test/resources/readme.txt +++ b/client/sniffer/src/test/resources/readme.txt @@ -2,3 +2,5 @@ few nodes in different configurations locally at various versions. They are for testing `ElasticsearchNodesSniffer` against different versions of Elasticsearch. + +See create_test_nodes_info.bash for how to create these. diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 407947000de..1f8b302715f 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -312,9 +312,17 @@ adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-header] -------------------------------------------------- +Or you can send requests to nodes with a particular attribute: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-attribute] +-------------------------------------------------- +<1> Replace the node selector with one that selects nodes on a particular rack. + ==== Multiple parallel asynchronous actions diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c2259c7b55d..3ee03403874 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -198,9 +198,7 @@ header. The warnings must match exactly. Using it looks like this: .... If the arguments to `do` include `node_selector` then the request is only -sent to nodes that match the `node_selector`. Currently only the `version` -selector is supported and it has the same logic as the `version` field in -`skip`. It looks like this: +sent to nodes that match the `node_selector`. It looks like this: .... "test id": @@ -216,6 +214,19 @@ selector is supported and it has the same logic as the `version` field in body: { foo: bar } .... +If you list multiple selectors then the request will only go to nodes that +match all of those selectors. The following selectors are supported: +* `version`: Only nodes who's version is within the range will receive the +request. The syntax for the pattern is the same as when `version` is within +`skip`. +* `attribute`: Only nodes that have an attribute matching the name and value +of the provided attribute match. Looks like: +.... + node_selector: + attribute: + name: value +.... + === `set` For some tests, it is necessary to extract a value from the previous `response`, in diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 8cfbf11bd64..8697b0bedcd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.rest.yaml.section; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; @@ -31,6 +32,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; @@ -131,11 +133,10 @@ public class DoSection implements ExecutableSection { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { selectorName = parser.currentName(); - } else if (token.isValue()) { - NodeSelector newSelector = buildNodeSelector( - parser.getTokenLocation(), selectorName, parser.text()); - nodeSelector = nodeSelector == NodeSelector.ANY ? - newSelector : new ComposeNodeSelector(nodeSelector, newSelector); + } else { + NodeSelector newSelector = buildNodeSelector(selectorName, parser); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); } } } else if (currentFieldName != null) { // must be part of API call then @@ -368,34 +369,64 @@ public class DoSection implements ExecutableSection { not(equalTo(409))))); } - private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + private static NodeSelector buildNodeSelector(String name, XContentParser parser) throws IOException { switch (name) { + case "attribute": + return parseAttributeValuesSelector(parser); case "version": - Version[] range = SkipSection.parseVersionRange(value); - return new NodeSelector() { - @Override - public void select(Iterable nodes) { - for (Iterator itr = nodes.iterator(); itr.hasNext();) { - Node node = itr.next(); - if (node.getVersion() == null) { - throw new IllegalStateException("expected [version] metadata to be set but got " - + node); - } - Version version = Version.fromString(node.getVersion()); - if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { - itr.remove(); - } + return parseVersionSelector(parser); + default: + throw new XContentParseException(parser.getTokenLocation(), "unknown node_selector [" + name + "]"); + } + } + + private static NodeSelector parseAttributeValuesSelector(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new XContentParseException(parser.getTokenLocation(), "expected START_OBJECT"); + } + String key = null; + XContentParser.Token token; + NodeSelector result = NodeSelector.ANY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + key = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = new HasAttributeNodeSelector(key, parser.text()); + result = result == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(result, newSelector); + } else { + throw new XContentParseException(parser.getTokenLocation(), "expected [" + key + "] to be a value"); + } + } + return result; + } + + private static NodeSelector parseVersionSelector(XContentParser parser) throws IOException { + if (false == parser.currentToken().isValue()) { + throw new XContentParseException(parser.getTokenLocation(), "expected [version] to be a value"); + } + Version[] range = SkipSection.parseVersionRange(parser.text()); + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); } } + } - @Override - public String toString() { - return "version between [" + range[0] + "] and [" + range[1] + "]"; - } - }; - default: - throw new IllegalArgumentException("unknown node_selector [" + name + "]"); - } + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; } /** diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 719044cfc81..c4c96d9fe2b 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -35,6 +35,7 @@ import org.hamcrest.MatcherAssert; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -511,7 +512,7 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase "just one entry this time"))); } - public void testNodeSelector() throws IOException { + public void testNodeSelectorByVersion() throws IOException { parser = createParser(YamlXContent.yamlXContent, "node_selector:\n" + " version: 5.2.0-6.0.0\n" + @@ -541,8 +542,90 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); } - private Node nodeWithVersion(String version) { - return new Node(new HttpHost("dummy"), null, null, version, null); + private static Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null, null); + } + + public void testNodeSelectorByAttribute() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr = nodeWithAttributes(singletonMap("attr", singletonList("val"))); + Node hasAttrWrongValue = nodeWithAttributes(singletonMap("attr", singletonList("notval"))); + Node notHasAttr = nodeWithAttributes(singletonMap("notattr", singletonList("val"))); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasAttr), nodes); + } + + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + " attr2: val2\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSectionWithTwoAttributes = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr2 = nodeWithAttributes(singletonMap("attr2", singletonList("val2"))); + Map> bothAttributes = new HashMap<>(); + bothAttributes.put("attr", singletonList("val")); + bothAttributes.put("attr2", singletonList("val2")); + Node hasBoth = nodeWithAttributes(bothAttributes); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + nodes.add(hasAttr2); + nodes.add(hasBoth); + doSectionWithTwoAttributes.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasBoth), nodes); + } + } + + private static Node nodeWithAttributes(Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, null, null, attributes); + } + + public void testNodeSelectorByTwoThings() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node both = nodeWithVersionAndAttributes("5.2.1", singletonMap("attr", singletonList("val"))); + Node badVersion = nodeWithVersionAndAttributes("5.1.1", singletonMap("attr", singletonList("val"))); + Node badAttr = nodeWithVersionAndAttributes("5.2.1", singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(both); + nodes.add(badVersion); + nodes.add(badAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(both), nodes); + } + + private static Node nodeWithVersionAndAttributes(String version, Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, version, null, attributes); } private void assertJsonEquals(Map actual, String expected) throws IOException { From 3274e7fd1a6ca22e3a642f415554674e8030fe85 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 15 Jun 2018 14:55:14 +0200 Subject: [PATCH 11/15] [Docs] Remove reference to repository-s3 plugin creating an S3 bucket (#31359) Closes #30910 --- docs/plugins/repository-s3.asciidoc | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index bff64ebdc91..6701d53c240 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -284,22 +284,7 @@ You may further restrict the permissions by specifying a prefix within the bucke // NOTCONSOLE The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository -registration will fail. If you want Elasticsearch to create the bucket instead, you can add the permission to create a -specific bucket like this: - -[source,js] ----- -{ - "Action": [ - "s3:CreateBucket" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::snaps.example.com" - ] -} ----- -// NOTCONSOLE +registration will fail. [[repository-s3-aws-vpc]] [float] From afc91e92fb697a3241b26a5bb7cdc0a3769f4085 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 15 Jun 2018 15:49:14 +0200 Subject: [PATCH 12/15] Add QA project and fixture based test for discovery-ec2 plugin (#31107) This commit adds a new QA sub project to the discovery-ec2 plugin. This project uses a fixture to test the plugin using a multi-node cluster. Once all nodes are started, the nodes transport addresses are written in a file that is later read by the fixture. --- plugins/discovery-ec2/build.gradle | 5 + .../discovery-ec2/qa/amazon-ec2/build.gradle | 72 +++++ ...azonEC2DiscoveryClientYamlTestSuiteIT.java | 37 +++ .../discovery/ec2/AmazonEC2Fixture.java | 194 ++++++++++++++ .../test/discovery_ec2/10_basic.yml | 15 ++ plugins/discovery-ec2/qa/build.gradle | 0 .../Ec2DiscoveryClusterFormationTests.java | 252 ------------------ 7 files changed, 323 insertions(+), 252 deletions(-) create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/build.gradle create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml create mode 100644 plugins/discovery-ec2/qa/build.gradle delete mode 100644 plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 7daf944f818..b1c3b62fd6e 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -53,6 +53,11 @@ test { systemProperty 'tests.artifact', project.name } +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-ec2:check' +} + thirdPartyAudit.excludes = [ // classes are missing 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle new file mode 100644 index 00000000000..90fac9e80cd --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:discovery-ec2', configuration: 'runtime') +} + +final int ec2NumberOfNodes = 3 +File ec2DiscoveryFile = new File(project.buildDir, 'generated-resources/nodes.uri') + +/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/ +task ec2Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, ec2DiscoveryFile.absolutePath +} + +Map expansions = [ + 'expected_nodes': ec2NumberOfNodes +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + dependsOn ec2Fixture + numNodes = ec2NumberOfNodes + plugin ':plugins:discovery-ec2' + keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key' + keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' + setting 'discovery.zen.hosts_provider', 'ec2' + setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}" + unicastTransportUri = { seedNode, node, ant -> return null } + + waitCondition = { node, ant -> + ec2DiscoveryFile.parentFile.mkdirs() + ec2DiscoveryFile.setText(integTest.nodes.collect { n -> "${n.transportUri()}" }.join('\n'), 'UTF-8') + + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/", + dest: tmpFile.toString(), + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..09d5a8d6fdf --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonEC2DiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonEC2DiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java new file mode 100644 index 00000000000..0cf4cbdeadb --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.ec2; + +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.fixture.AbstractHttpFixture; + +import javax.xml.XMLConstants; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamWriter; +import java.io.IOException; +import java.io.StringWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.UUID; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * {@link AmazonEC2Fixture} is a fixture that emulates an AWS EC2 service. + */ +public class AmazonEC2Fixture extends AbstractHttpFixture { + + private final Path nodes; + + private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) { + super(workingDir); + this.nodes = toPath(Objects.requireNonNull(nodesUriPath)); + } + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonEC2Fixture "); + } + + final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]); + fixture.listen(); + } + + @Override + protected Response handle(final Request request) throws IOException { + if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) { + final String userAgent = request.getHeader("User-Agent"); + if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { + // Simulate an EC2 DescribeInstancesResponse + byte[] responseBody = EMPTY_BYTE; + for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) { + if ("Action".equals(parse.getName())) { + responseBody = generateDescribeInstancesResponse(); + break; + } + } + return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody); + } + } + return null; + } + + /** + * Generates a XML response that describe the EC2 instances + */ + private byte[] generateDescribeInstancesResponse() { + final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); + xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); + + final StringWriter out = new StringWriter(); + XMLStreamWriter sw; + try { + sw = xmlOutputFactory.createXMLStreamWriter(out); + sw.writeStartDocument(); + + String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; + sw.setDefaultNamespace(namespace); + sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); + { + sw.writeStartElement("requestId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("reservationSet"); + { + if (Files.exists(nodes)) { + for (String address : Files.readAllLines(nodes)) { + + sw.writeStartElement("item"); + { + sw.writeStartElement("reservationId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instancesSet"); + { + sw.writeStartElement("item"); + { + sw.writeStartElement("instanceId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("imageId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instanceState"); + { + sw.writeStartElement("code"); + sw.writeCharacters("16"); + sw.writeEndElement(); + + sw.writeStartElement("name"); + sw.writeCharacters("running"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateDnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("dnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("instanceType"); + sw.writeCharacters("m1.medium"); + sw.writeEndElement(); + + sw.writeStartElement("placement"); + { + sw.writeStartElement("availabilityZone"); + sw.writeCharacters("use-east-1e"); + sw.writeEndElement(); + + sw.writeEmptyElement("groupName"); + + sw.writeStartElement("tenancy"); + sw.writeCharacters("default"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateIpAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("ipAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + } + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeEndDocument(); + sw.flush(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return out.toString().getBytes(UTF_8); + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path toPath(final String dir) { + return Paths.get(dir); + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml new file mode 100644 index 00000000000..682327b72dd --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml @@ -0,0 +1,15 @@ +# Integration tests for discovery-ec2 +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: ${expected_nodes} + +--- +"All nodes are correctly discovered": + + - do: + nodes.info: + metric: [ transport ] + + - match: { _nodes.total: ${expected_nodes} } diff --git a/plugins/discovery-ec2/qa/build.gradle b/plugins/discovery-ec2/qa/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java deleted file mode 100644 index 49fd9de71ec..00000000000 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.ec2; - -import com.amazonaws.util.IOUtils; -import com.sun.net.httpserver.Headers; -import com.sun.net.httpserver.HttpServer; -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.mocksocket.MockHttpServer; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import javax.xml.XMLConstants; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; -import java.io.IOException; -import java.io.OutputStream; -import java.io.StringWriter; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) -@SuppressForbidden(reason = "use http server") -// TODO this should be a IT but currently all ITs in this project run against a real cluster -public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase { - - private static HttpServer httpServer; - private static Path logDir; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(Ec2DiscoveryPlugin.class); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Path resolve = logDir.resolve(Integer.toString(nodeOrdinal)); - try { - Files.createDirectory(resolve); - } catch (IOException e) { - throw new RuntimeException(e); - } - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(AwsEc2Service.ACCESS_KEY_SETTING.getKey(), "some_access"); - secureSettings.setString(AwsEc2Service.SECRET_KEY_SETTING.getKey(), "some_secret"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "ec2") - .put("path.logs", resolve) - .put("transport.tcp.port", 0) - .put("node.portsfile", "true") - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" + - httpServer.getAddress().getPort()) - .setSecureSettings(secureSettings) - .build(); - } - - /** - * Creates mock EC2 endpoint providing the list of started nodes to the DescribeInstances API call - */ - @BeforeClass - public static void startHttpd() throws Exception { - logDir = createTempDir(); - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - - httpServer.createContext("/", (s) -> { - Headers headers = s.getResponseHeaders(); - headers.add("Content-Type", "text/xml; charset=UTF-8"); - String action = null; - for (NameValuePair parse : URLEncodedUtils.parse(IOUtils.toString(s.getRequestBody()), StandardCharsets.UTF_8)) { - if ("Action".equals(parse.getName())) { - action = parse.getValue(); - break; - } - } - assertThat(action, equalTo("DescribeInstances")); - - XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); - xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); - StringWriter out = new StringWriter(); - XMLStreamWriter sw; - try { - sw = xmlOutputFactory.createXMLStreamWriter(out); - sw.writeStartDocument(); - - String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; - sw.setDefaultNamespace(namespace); - sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); - { - sw.writeStartElement("requestId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("reservationSet"); - { - Path[] files = FileSystemUtils.files(logDir); - for (int i = 0; i < files.length; i++) { - Path resolve = files[i].resolve("transport.ports"); - if (Files.exists(resolve)) { - List addresses = Files.readAllLines(resolve); - Collections.shuffle(addresses, random()); - - sw.writeStartElement("item"); - { - sw.writeStartElement("reservationId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instancesSet"); - { - sw.writeStartElement("item"); - { - sw.writeStartElement("instanceId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("imageId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instanceState"); - { - sw.writeStartElement("code"); - sw.writeCharacters("16"); - sw.writeEndElement(); - - sw.writeStartElement("name"); - sw.writeCharacters("running"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateDnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("dnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("instanceType"); - sw.writeCharacters("m1.medium"); - sw.writeEndElement(); - - sw.writeStartElement("placement"); - { - sw.writeStartElement("availabilityZone"); - sw.writeCharacters("use-east-1e"); - sw.writeEndElement(); - - sw.writeEmptyElement("groupName"); - - sw.writeStartElement("tenancy"); - sw.writeCharacters("default"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateIpAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("ipAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - } - } - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeEndDocument(); - sw.flush(); - - final byte[] responseAsBytes = out.toString().getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(200, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - } catch (XMLStreamException e) { - Loggers.getLogger(Ec2DiscoveryClusterFormationTests.class).error("Failed serializing XML", e); - throw new RuntimeException(e); - } - }); - - httpServer.start(); - } - - @AfterClass - public static void stopHttpd() throws IOException { - for (int i = 0; i < internalCluster().size(); i++) { - // shut them all down otherwise we get spammed with connection refused exceptions - internalCluster().stopRandomDataNode(); - } - httpServer.stop(0); - httpServer = null; - logDir = null; - } - - public void testJoin() throws ExecutionException, InterruptedException { - // only wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); - // add one more node and wait for it to join - internalCluster().startDataOnlyNode(); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); - } -} From 02346c20a2022a927d88ed3b6819634d7b35140e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 15 Jun 2018 15:55:39 +0200 Subject: [PATCH 13/15] Rankeval: Fold template test project into main module (#31203) This change moves tests in `smoke-test-rank-eval-with-mustache` into the main ranking evaluation module by declaring that the integration testing cluster requires the `lang-mustache` plugin. This avoids having to maintain the qa project for only one basic test suite. --- modules/rank-eval/build.gradle | 9 +++++ .../test/rank_eval/40_rank_eval_templated.yml | 0 .../build.gradle | 27 ------------- ...stRankEvalWithMustacheYAMLTestSuiteIT.java | 39 ------------------- 4 files changed, 9 insertions(+), 66 deletions(-) rename qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/10_rank_eval_templated.yml => modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml (100%) delete mode 100644 qa/smoke-test-rank-eval-with-mustache/build.gradle delete mode 100644 qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index f0479f6e4ab..0692fda5025 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -23,3 +23,12 @@ esplugin { hasClientJar = true } +integTestCluster { + // Modules who's integration is explicitly tested in integration tests + module project(':modules:lang-mustache') +} + +run { + // Modules who's integration is explicitly tested in integration tests + module project(':modules:lang-mustache') +} diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/10_rank_eval_templated.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml similarity index 100% rename from qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/10_rank_eval_templated.yml rename to modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml diff --git a/qa/smoke-test-rank-eval-with-mustache/build.gradle b/qa/smoke-test-rank-eval-with-mustache/build.gradle deleted file mode 100644 index d88ee719161..00000000000 --- a/qa/smoke-test-rank-eval-with-mustache/build.gradle +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - - -dependencies { - testCompile project(path: ':modules:rank-eval', configuration: 'runtime') - testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') -} diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java deleted file mode 100644 index b8b1607065c..00000000000 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.rankeval; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -public class SmokeTestRankEvalWithMustacheYAMLTestSuiteIT extends ESClientYamlSuiteTestCase { - - public SmokeTestRankEvalWithMustacheYAMLTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } - -} From 8453ca638d4d8a0d82628484a2a8c0183929b066 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 15 Jun 2018 10:58:21 -0400 Subject: [PATCH 14/15] Upgrade to Lucene-7.4.0-snapshot-518d303506 (#31360) --- buildSrc/version.properties | 2 +- .../lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 | 1 + ...lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - ...lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 | 1 + ...lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - ...lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 | 1 + ...cene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - ...cene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 | 1 + server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 | 1 + server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 | 1 + server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 | 1 + .../elasticsearch/indices/analysis/AnalysisFactoryTestCase.java | 2 ++ .../licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 | 1 - .../licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 + 50 files changed, 27 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7aedd395b93..d89ffa78ed8 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-0a7c3f462f +lucene = 7.4.0-snapshot-518d303506 # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 25e2291d36e..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf2cfa0551ebdf08a2cf3079f3c74643bd9dbb76 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..2e666a2d566 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +a57659a275921d8ab3f7ec580e9bf713ce6143b1 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 3fdd3366122..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82d83fcac1d9c8948aa0247fc9c87f177ddbd59b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..03f1b7d27ae --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +b91a260d8d12ee4b3302a63059c73a34de0ce146 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 855d6ebe4ae..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73fd4364f2931e7c8303b5927e140a7d21116c36 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..9a5c6669009 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +cc1ca9bd9e2c162dd1da8c2e7111913fd8033e48 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 091097f1a84..00000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a2c4417fa9a8be078864f590a5a66b98d551cf5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..cbf4f78c319 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +2fa3662a10a9e085b1c7b87293d727422cbe6224 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index b18addf0b58..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fa179924f139a30fc0e5399256e1a44562ed32b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..bd5bf428b6d --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +60aa50c11857e6739e68936cb45102562b2c46b4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 7b7141b6f40..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ed135d34d7868b71a725257a46dc8d8735a15d4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..a73900802ac --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +4586368007785a3be26db4b9ce404ffb8c76f350 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 73be96c477e..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -875911b36b99c2103719f94559878a0ecb862fb6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..bf0a50f7154 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +9c6d030ab2c148df7a6ba73a774ef4b8c720a6cb \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0c85d3f6c85..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7191628df8cb72382a20da79224aef677117849 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..ba6ceb2aed9 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +8275bf8df2644d5fcec2963cf237d14b6e00fefe \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index db3885eb62f..00000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cd761f40c4a89ed977167f0518d12e409eaf3d8 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..4c0db7a735c --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +557d62d2b13d3dcb1810a1633e22625e42425425 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index bd8711a4d53..00000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c93ed67599d345b9359586248ab92342d7d3033 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..0579316096a --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +d3755ad4c98b49fe5055b32358e3071727177c03 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 36bf03bbbdb..00000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..134072bc137 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0f940ee9c7a..00000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b4be9f96edfd3dbcff5aa9b3f0914e86eb9cc51 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..8a3327cc8a2 --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +b62ebd53bbefb2f59cd246157a6768cae8a5a3a1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index fdc9336fb2c..00000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5dcceb5bc017cee6ab5d3ee1943aca1ac6fe074 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..75fb5a77556 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +cba0fd4ccb98db8a72287a95d6b653e455f9eeb3 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 62726ca415a..00000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b59e7441f121da969bef8eef2c0c61743b4230a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..01e0197bc17 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +5127ed0b7516f8b28d84e837df4f33c67e361f6c \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index a68093d2fc4..00000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46736dbb07b432f0a7c1b3080f62932c483e5cb9 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..3d6069f2a5c --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +45c7b13aae1104f9f5f0fca0606e5741309c8d74 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 23e2b68f3df..00000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee203718d525da0c6258a51a5a32d877089fe5af \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..a74be59aea3 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +2540c4b5d9dca8a39a3b4d58efe4ab484df7254f \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 5bac053813e..00000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf17a332d8e42a45e8f013d5df408f4391d2620a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..cf26412b63f --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +e9d0c0c020917d4bf9b590526866ff5547dbaa17 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 471aa797028..00000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04832303d70502d2ece44501cb1716f42e24fe35 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..63533b77467 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +50969cdb7279047fbec94dda6e7d74d1c73c07f8 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 486dafc10c7..00000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -639313e3a9573779b6a28b45a7f57fc1f73ffa46 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..4eab31d62bd --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +94524b293572b1f0d01a0faeeade1ff24713f966 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0a083b5a078..00000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6144b493ba3588a638858d0058054758acc619b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..ae5a2ea0375 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +878db723e41ece636ed338c4ef374e900f221a14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 851b0d76d3e..00000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d00c6b8bbbbb496aecd555406267fee9e0af914 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..9f5129d8905 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c8dc85c32aeac6ff320aa6a9ea57881ad4847a55 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 22ce3c72443..00000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -159cdb6d36845690cb1972d02cc0b472bb14b7f3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..02fcef681fc --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +203d8d22ab172e624784a5fdeaecdd01ae25fb3d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0724381bcc6..00000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af1dd0218d58990cca5c1592d9722e67d233c996 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..a7daa7ff02a --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +4d6cf8fa1064a86991d5cd12a2ed32119ac91212 \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index c5b89adfd73..3fded43d858 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -219,6 +219,8 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. .put("protectedterm", Void.class) + // LUCENE-8332 + .put("concatenategraph", Void.class) .immutableMap(); diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 36bf03bbbdb..00000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 00000000000..134072bc137 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file From fec7860edccd60a4e11d249367c7289281154920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 15 Jun 2018 17:12:42 +0200 Subject: [PATCH 15/15] [Tests] Fix edge case in ScriptedMetricAggregatorTests (#31357) An expected exception is only thrown when there are documents in the index created in the test setup. Fixed the test by making sure there is at least one. Closes #31307 --- .../scripted/ScriptedMetricAggregatorTests.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index 9417cc092d8..7a7c66d21aa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -118,7 +118,7 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { SCRIPTS.put("initScriptParams", params -> { Map agg = (Map) params.get("_agg"); Integer initialValue = (Integer)params.get("initialValue"); - ArrayList collector = new ArrayList(); + ArrayList collector = new ArrayList<>(); collector.add(initialValue); agg.put("collector", collector); return agg; @@ -175,7 +175,6 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { /** * without combine script, the "_aggs" map should contain a list of the size of the number of documents matched */ - @SuppressWarnings("unchecked") public void testScriptedMetricWithoutCombine() throws IOException { try (Directory directory = newDirectory()) { int numDocs = randomInt(100); @@ -190,8 +189,11 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); assertEquals(AGG_NAME, scriptedMetric.getName()); assertNotNull(scriptedMetric.aggregation()); + @SuppressWarnings("unchecked") Map agg = (Map) scriptedMetric.aggregation(); - assertEquals(numDocs, ((List) agg.get("collector")).size()); + @SuppressWarnings("unchecked") + List list = (List) agg.get("collector"); + assertEquals(numDocs, list.size()); } } } @@ -300,10 +302,9 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31307") public void testSelfReferencingAggStateAfterMap() throws IOException { try (Directory directory = newDirectory()) { - Integer numDocs = randomInt(100); + Integer numDocs = randomIntBetween(1, 100); try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { for (int i = 0; i < numDocs; i++) { indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));