diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 5aa7d7ac187..de14fe158e3 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -127,7 +127,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index ae107e95157..a8ee1677bbd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -542,7 +542,7 @@ public abstract class TransportReplicationAction< new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, - new ConcreteShardRequest<>(request, targetAllocationID), + new ConcreteReplicaRequest<>(request, targetAllocationID, globalCheckpoint), handler); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 1e53faa9efc..d0462efc391 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -35,6 +35,7 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; @@ -297,6 +298,7 @@ final class Bootstrap { throw new BootstrapException(e); } checkForCustomConfFile(); + checkConfigExtension(environment.configExtension()); if (environment.pidFile() != null) { try { @@ -412,6 +414,14 @@ final class Bootstrap { } } + // pkg private for tests + static void checkConfigExtension(String extension) { + if (".yml".equals(extension) || ".json".equals(extension)) { + final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Bootstrap.class)); + deprecationLogger.deprecated("elasticsearch{} is deprecated; rename your configuration file to elasticsearch.yaml", extension); + } + } + @SuppressForbidden(reason = "Allowed to exit explicitly in bootstrap phase") private static void exit(int status) { System.exit(status); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 54a1c5fa444..96ff8fbf90d 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -315,11 +315,11 @@ final class BootstrapChecks { static class MaxNumberOfThreadsCheck implements BootstrapCheck { // this should be plenty for machines up to 256 cores - private final long maxNumberOfThreadsThreshold = 1 << 12; + private static final long MAX_NUMBER_OF_THREADS_THRESHOLD = 1 << 12; @Override public boolean check() { - return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold; + return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD; } @Override @@ -329,7 +329,7 @@ final class BootstrapChecks { "max number of threads [%d] for user [%s] is too low, increase to at least [%d]", getMaxNumberOfThreads(), BootstrapInfo.getSystemProperties().get("user.name"), - maxNumberOfThreadsThreshold); + MAX_NUMBER_OF_THREADS_THRESHOLD); } // visible for testing @@ -369,11 +369,11 @@ final class BootstrapChecks { static class MaxMapCountCheck implements BootstrapCheck { - private final long limit = 1 << 18; + private static final long LIMIT = 1 << 18; @Override public boolean check() { - return getMaxMapCount() != -1 && getMaxMapCount() < limit; + return getMaxMapCount() != -1 && getMaxMapCount() < LIMIT; } @Override @@ -382,7 +382,7 @@ final class BootstrapChecks { Locale.ROOT, "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", getMaxMapCount(), - limit); + LIMIT); } // visible for testing diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index fbe47a17826..64902266d42 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -79,21 +79,21 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri /** It's possible that some geometries in a MULTI* shape might overlap. With the possible exception of GeometryCollection, * this normally isn't allowed. */ - protected final boolean multiPolygonMayOverlap = false; + protected static final boolean MULTI_POLYGON_MAY_OVERLAP = false; /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#validate() */ - protected final boolean autoValidateJtsGeometry = true; + protected static final boolean AUTO_VALIDATE_JTS_GEOMETRY = true; /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#index() */ - protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. + protected static final boolean AUTO_INDEX_JTS_GEOMETRY = true;//may want to turn off once SpatialStrategy impls do it. protected ShapeBuilder() { } protected JtsGeometry jtsGeometry(Geometry geom) { //dateline180Check is false because ElasticSearch does it's own dateline wrapping - JtsGeometry jtsGeometry = new JtsGeometry(geom, SPATIAL_CONTEXT, false, multiPolygonMayOverlap); - if (autoValidateJtsGeometry) + JtsGeometry jtsGeometry = new JtsGeometry(geom, SPATIAL_CONTEXT, false, MULTI_POLYGON_MAY_OVERLAP); + if (AUTO_VALIDATE_JTS_GEOMETRY) jtsGeometry.validate(); - if (autoIndexJtsGeometry) + if (AUTO_INDEX_JTS_GEOMETRY) jtsGeometry.index(); return jtsGeometry; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index e93240b9a32..7b8f8bbdff3 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -24,25 +24,25 @@ import java.security.GeneralSecurityException; import java.util.EnumSet; import java.util.Set; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.util.ArrayUtils; - /** * A secure setting. * * This class allows access to settings from the Elasticsearch keystore. */ public abstract class SecureSetting extends Setting { + + /** Determines whether legacy settings with sensitive values should be allowed. */ + private static final boolean ALLOW_INSECURE_SETTINGS = Booleans.parseBoolean(System.getProperty("es.allow_insecure_settings", "false")); + private static final Set ALLOWED_PROPERTIES = EnumSet.of(Property.Deprecated, Property.Shared); private static final Property[] FIXED_PROPERTIES = { Property.NodeScope }; - private static final Property[] LEGACY_PROPERTIES = { - Property.NodeScope, Property.Deprecated, Property.Filtered - }; - private SecureSetting(String key, Property... properties) { super(key, (String)null, null, ArrayUtils.concat(properties, FIXED_PROPERTIES, Property.class)); assert assertAllowedProperties(properties); @@ -133,6 +133,23 @@ public abstract class SecureSetting extends Setting { }; } + /** + * A setting which contains a sensitive string, but which for legacy reasons must be found outside secure settings. + * @see #secureString(String, Setting, Property...) + */ + public static Setting insecureString(String name) { + return new Setting(name, "", SecureString::new, Property.Deprecated, Property.Filtered, Property.NodeScope) { + @Override + public SecureString get(Settings settings) { + if (ALLOW_INSECURE_SETTINGS == false && exists(settings)) { + throw new IllegalArgumentException("Setting [" + name + "] is insecure, " + + "but property [allow_insecure_settings] is not set"); + } + return super.get(settings); + } + }; + } + /** * A setting which contains a file. Reading the setting opens an input stream to the file. * diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 7e40e177d3e..3cfbfb92d32 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -410,6 +410,7 @@ public final class ObjectParser extends AbstractObjectParser HighlightBuilder.fromXContent(c), SearchSourceBuilder.HIGHLIGHT_FIELD); PARSER.declareObject(InnerHitBuilder::setChildInnerHits, (p, c) -> { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractBulkByScrollRequest.java b/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java similarity index 99% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractBulkByScrollRequest.java rename to core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 44580875011..a582248af11 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractBulkByScrollRequest.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; @@ -355,7 +355,7 @@ public abstract class AbstractBulkByScrollRequest bulkFailures; - private List searchFailures; + private List searchFailures; private boolean timedOut; public BulkByScrollResponse() { } public BulkByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List bulkFailures, - List searchFailures, boolean timedOut) { + List searchFailures, boolean timedOut) { this.took = took; this.status = requireNonNull(status, "Null status not supported"); this.bulkFailures = bulkFailures; @@ -139,7 +138,7 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContent { /** * All search failures. */ - public List getSearchFailures() { + public List getSearchFailures() { return searchFailures; } @@ -166,7 +165,7 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContent { took = new TimeValue(in); status = new BulkByScrollTask.Status(in); bulkFailures = in.readList(Failure::new); - searchFailures = in.readList(SearchFailure::new); + searchFailures = in.readList(ScrollableHitSource.SearchFailure::new); timedOut = in.readBoolean(); } @@ -181,7 +180,7 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContent { failure.toXContent(builder, params); builder.endObject(); } - for (SearchFailure failure: searchFailures) { + for (ScrollableHitSource.SearchFailure failure: searchFailures) { failure.toXContent(builder, params); } builder.endArray(); @@ -199,4 +198,4 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContent { builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size()))); return builder.append(']').toString(); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java b/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java similarity index 99% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java rename to core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 7c9124057b3..18c6dac9206 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java b/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java similarity index 99% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java rename to core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 3bacc187ebb..2f6775a1eae 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java b/core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java similarity index 91% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java rename to core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java index b55fe33340e..c1abb16ca39 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.Action; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.DeleteByQueryRequest; import org.elasticsearch.client.ElasticsearchClient; public class DeleteByQueryAction extends Action { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java b/core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java similarity index 97% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java rename to core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index 2644d0d9496..ad70748f3e4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -81,7 +81,7 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest listener, int sliceId, Exception e) { + public void onSliceFailure(ActionListener listener, int sliceId, Exception e) { results.setOnce(sliceId, new Result(sliceId, e)); recordSliceCompletionAndRespondIfAllDone(listener); // TODO cancel when a slice fails? diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/core/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java similarity index 97% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java rename to core/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 2fa513f9c57..76944c7b804 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -128,7 +127,7 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java b/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java similarity index 99% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java rename to core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 5fad275cde4..878a9c61e4c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex.remote; +package org.elasticsearch.index.reindex; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java b/core/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java similarity index 99% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java rename to core/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index 6426bad592f..3d1eb582db8 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -111,7 +111,7 @@ public abstract class ScrollableHitSource { /** * Set the id of the last scroll. Used for debugging. */ - final void setScroll(String scrollId) { + public final void setScroll(String scrollId) { this.scrollId.set(scrollId); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java b/core/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java similarity index 96% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java rename to core/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java index a0176e35202..6547984900e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; /** * Implemented by {@link BulkByScrollTask} and {@link BulkByScrollTask.Status} to consistently implement diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java b/core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java similarity index 95% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java rename to core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java index cb716e82248..1058f7f1307 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.Action; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.client.ElasticsearchClient; public class UpdateByQueryAction extends diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java similarity index 97% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java rename to core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index 3e7fac9d454..ad0123d76ce 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -71,7 +71,7 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequesttrue if an fsync is required to ensure durability of the translogs operations or it's metadata. + */ public boolean syncNeeded() { try (ReleasableLock lock = readLock.acquire()) { return current.syncNeeded(); diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index b4400f60b81..daf9a44b666 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -209,7 +209,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } /** - * returns true if there are buffered ops + * Returns true if there are buffered operations that have not been flushed and fsynced to disk or if the latest global + * checkpoint has not yet been fsynced */ public boolean syncNeeded() { return totalOffset != lastSyncedCheckpoint.offset || globalCheckpointSupplier.getAsLong() != lastSyncedCheckpoint.globalCheckpoint; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index d9540019e80..18557b5c7b8 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -70,7 +70,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget private static final AtomicLong idGenerator = new AtomicLong(); - private final String RECOVERY_PREFIX = "recovery."; + private static final String RECOVERY_PREFIX = "recovery."; private final ShardId shardId; private final long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java index d9cefc6b22e..b8862fb20e5 100644 --- a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java @@ -19,14 +19,6 @@ package org.elasticsearch.node; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -38,6 +30,14 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.env.Environment; + import static org.elasticsearch.common.Strings.cleanPath; public class InternalSettingsPreparer { @@ -116,7 +116,8 @@ public class InternalSettingsPreparer { // we put back the path.logs so we can use it in the logging configuration file output.put(Environment.PATH_LOGS_SETTING.getKey(), cleanPath(environment.logsFile().toAbsolutePath().toString())); - return new Environment(output.build()); + String configExtension = foundSuffixes.isEmpty() ? null : foundSuffixes.iterator().next(); + return new Environment(output.build(), configExtension); } /** diff --git a/core/src/main/java/org/elasticsearch/script/LeafSearchScript.java b/core/src/main/java/org/elasticsearch/script/LeafSearchScript.java index 0bf9e0d50e0..762168d3c90 100644 --- a/core/src/main/java/org/elasticsearch/script/LeafSearchScript.java +++ b/core/src/main/java/org/elasticsearch/script/LeafSearchScript.java @@ -19,18 +19,30 @@ package org.elasticsearch.script; +import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.ScorerAware; import java.util.Map; /** * A per-segment {@link SearchScript}. + * + * This is effectively a functional interface, requiring at least implementing {@link #runAsDouble()}. */ public interface LeafSearchScript extends ScorerAware, ExecutableScript { - void setDocument(int doc); + /** + * Set the document this script will process next. + */ + default void setDocument(int doc) {} - void setSource(Map source); + @Override + default void setScorer(Scorer scorer) {} + + /** + * Set the source for the current document. + */ + default void setSource(Map source) {} /** * Sets per-document aggregation {@code _value}. @@ -44,8 +56,23 @@ public interface LeafSearchScript extends ScorerAware, ExecutableScript { setNextVar("_value", value); } - long runAsLong(); + @Override + default void setNextVar(String field, Object value) {} + /** + * Return the result as a long. This is used by aggregation scripts over long fields. + */ + default long runAsLong() { + throw new UnsupportedOperationException("runAsLong is not implemented"); + } + + @Override + default Object run() { + return runAsDouble(); + } + + /** + * Return the result as a double. This is the main use case of search script, used for document scoring. + */ double runAsDouble(); - } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngine.java b/core/src/main/java/org/elasticsearch/script/ScriptEngine.java index 89ded962567..78a1478a9c9 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngine.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngine.java @@ -38,7 +38,9 @@ public interface ScriptEngine extends Closeable { /** * The extension for file scripts in this language. */ - String getExtension(); + default String getExtension() { + return getType(); + } /** * Compiles a script. diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java new file mode 100644 index 00000000000..482a90f0852 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.support.AggregationPath; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +/** + * {@link Bucket} Ordering strategy. + */ +public abstract class BucketOrder implements ToXContentObject, Writeable { + + /** + * Creates a bucket ordering strategy that sorts buckets by their document counts (ascending or descending). + * + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + */ + public static BucketOrder count(boolean asc) { + return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC; + } + + /** + * Creates a bucket ordering strategy that sorts buckets by their keys (ascending or descending). This may be + * used as a tie-breaker to avoid non-deterministic ordering. + * + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + */ + public static BucketOrder key(boolean asc) { + return asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC; + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on a single-valued sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + public static BucketOrder aggregation(String path, boolean asc) { + return new InternalOrder.Aggregation(path, asc); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on a metric from a multi-valued sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param metricName name of the value of the multi-value metric to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + public static BucketOrder aggregation(String path, String metricName, boolean asc) { + return new InternalOrder.Aggregation(path + "." + metricName, asc); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on multiple criteria. A tie-breaker may be added to + * avoid non-deterministic ordering. + * + * @param orders a list of {@link BucketOrder} objects to sort on, in order of priority. + */ + public static BucketOrder compound(List orders) { + return new InternalOrder.CompoundOrder(orders); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on multiple criteria. A tie-breaker may be added to + * avoid non-deterministic ordering. + * + * @param orders a list of {@link BucketOrder} parameters to sort on, in order of priority. + */ + public static BucketOrder compound(BucketOrder... orders) { + return compound(Arrays.asList(orders)); + } + + /** + * @return A comparator for the bucket based on the given aggregator. The comparator is used in two phases: + *

+ * - aggregation phase, where each shard builds a list of buckets to be sent to the coordinating node. + * In this phase, the passed in aggregator will be the aggregator that aggregates the buckets on the + * shard level. + *

+ * - reduce phase, where the coordinating node gathers all the buckets from all the shards and reduces them + * to a final bucket list. In this case, the passed in aggregator will be {@code null}. + */ + public abstract Comparator comparator(Aggregator aggregator); + + /** + * @return unique internal ID used for reading/writing this order from/to a stream. + * @see InternalOrder.Streams + */ + abstract byte id(); + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); + + @Override + public void writeTo(StreamOutput out) throws IOException { + InternalOrder.Streams.writeOrder(this, out); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java new file mode 100644 index 00000000000..e81c0b1890b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -0,0 +1,595 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.Comparators; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.support.AggregationPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; + +/** + * Implementations for {@link Bucket} ordering strategies. + */ +public class InternalOrder extends BucketOrder { + + private final byte id; + private final String key; + protected final boolean asc; + protected final Comparator comparator; + + /** + * Creates an ordering strategy that sorts {@link Bucket}s by some property. + * + * @param id unique ID for this ordering strategy. + * @param key key of the property to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @param comparator determines how buckets will be ordered. + */ + public InternalOrder(byte id, String key, boolean asc, Comparator comparator) { + this.id = id; + this.key = key; + this.asc = asc; + this.comparator = comparator; + } + + @Override + byte id() { + return id; + } + + @Override + public Comparator comparator(Aggregator aggregator) { + return comparator; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); + } + + /** + * Validate a bucket ordering strategy for an {@link Aggregator}. + * + * @param order bucket ordering strategy to sort on. + * @param aggregator aggregator to sort. + * @return unmodified bucket ordering strategy. + * @throws AggregationExecutionException if validation fails + */ + public static BucketOrder validate(BucketOrder order, Aggregator aggregator) throws AggregationExecutionException { + if (order instanceof CompoundOrder) { + for (BucketOrder innerOrder : ((CompoundOrder) order).orderElements) { + validate(innerOrder, aggregator); + } + } else if (order instanceof Aggregation) { + ((Aggregation) order).path().validate(aggregator); + } + return order; + } + + /** + * {@link Bucket} ordering strategy to sort by a sub-aggregation. + */ + public static class Aggregation extends InternalOrder { + + static final byte ID = 0; + + /** + * Create a new ordering strategy to sort by a sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + Aggregation(String path, boolean asc) { + super(ID, path, asc, new AggregationComparator(path, asc)); + } + + /** + * @return parsed path to the sub-aggregation to sort on. + */ + public AggregationPath path() { + return ((AggregationComparator) comparator).path; + } + + @Override + public Comparator comparator(Aggregator aggregator) { + if (aggregator instanceof TermsAggregator) { + // Internal Optimization for terms aggregation to avoid constructing buckets for ordering purposes + return ((TermsAggregator) aggregator).bucketComparator(path(), asc); + } + return comparator; + } + + /** + * {@link Bucket} ordering strategy to sort by a sub-aggregation. + */ + static class AggregationComparator implements Comparator { + + private final AggregationPath path; + private final boolean asc; + + /** + * Create a new {@link Bucket} ordering strategy to sort by a sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + AggregationComparator(String path, boolean asc) { + this.asc = asc; + this.path = AggregationPath.parse(path); + } + + @Override + public int compare(Bucket b1, Bucket b2) { + double v1 = path.resolveValue(b1); + double v2 = path.resolveValue(b2); + return Comparators.compareDiscardNaN(v1, v2, asc); + } + } + } + + /** + * {@link Bucket} ordering strategy to sort by multiple criteria. + */ + public static class CompoundOrder extends BucketOrder { + + static final byte ID = -1; + + final List orderElements; + + /** + * Create a new ordering strategy to sort by multiple criteria. A tie-breaker may be added to avoid + * non-deterministic ordering. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + */ + CompoundOrder(List compoundOrder) { + this(compoundOrder, true); + } + + /** + * Create a new ordering strategy to sort by multiple criteria. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + * @param absoluteOrdering {@code true} to add a tie-breaker to avoid non-deterministic ordering if needed, + * {@code false} otherwise. + */ + CompoundOrder(List compoundOrder, boolean absoluteOrdering) { + this.orderElements = new LinkedList<>(compoundOrder); + BucketOrder lastElement = null; + for (BucketOrder order : orderElements) { + if (order instanceof CompoundOrder) { + throw new IllegalArgumentException("nested compound order not supported"); + } + lastElement = order; + } + if (absoluteOrdering && isKeyOrder(lastElement) == false) { + // add key order ascending as a tie-breaker to avoid non-deterministic ordering + // if all user provided comparators return 0. + this.orderElements.add(KEY_ASC); + } + } + + @Override + byte id() { + return ID; + } + + /** + * @return unmodifiable list of {@link BucketOrder}s to sort on. + */ + public List orderElements() { + return Collections.unmodifiableList(orderElements); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (BucketOrder order : orderElements) { + order.toXContent(builder, params); + } + return builder.endArray(); + } + + @Override + public Comparator comparator(Aggregator aggregator) { + return new CompoundOrderComparator(orderElements, aggregator); + } + + @Override + public int hashCode() { + return Objects.hash(orderElements); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CompoundOrder other = (CompoundOrder) obj; + return Objects.equals(orderElements, other.orderElements); + } + + /** + * {@code Comparator} for sorting buckets by multiple criteria. + */ + static class CompoundOrderComparator implements Comparator { + + private List compoundOrder; + private Aggregator aggregator; + + /** + * Create a new {@code Comparator} for sorting buckets by multiple criteria. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + * @param aggregator {@link BucketOrder#comparator(Aggregator)} + */ + CompoundOrderComparator(List compoundOrder, Aggregator aggregator) { + this.compoundOrder = compoundOrder; + this.aggregator = aggregator; + } + + @Override + public int compare(Bucket b1, Bucket b2) { + int result = 0; + for (Iterator itr = compoundOrder.iterator(); itr.hasNext() && result == 0; ) { + result = itr.next().comparator(aggregator).compare(b1, b2); + } + return result; + } + } + } + + private static final byte COUNT_DESC_ID = 1; + private static final byte COUNT_ASC_ID = 2; + private static final byte KEY_DESC_ID = 3; + private static final byte KEY_ASC_ID = 4; + + /** + * Order by the (higher) count of each bucket. + */ + static final InternalOrder COUNT_DESC = new InternalOrder(COUNT_DESC_ID, "_count", false, comparingCounts().reversed()); + + /** + * Order by the (lower) count of each bucket. + */ + static final InternalOrder COUNT_ASC = new InternalOrder(COUNT_ASC_ID, "_count", true, comparingCounts()); + + /** + * Order by the key of each bucket descending. + */ + static final InternalOrder KEY_DESC = new InternalOrder(KEY_DESC_ID, "_key", false, comparingKeys().reversed()); + + /** + * Order by the key of each bucket ascending. + */ + static final InternalOrder KEY_ASC = new InternalOrder(KEY_ASC_ID, "_key", true, comparingKeys()); + + /** + * @return compare by {@link Bucket#getDocCount()}. + */ + private static Comparator comparingCounts() { + return Comparator.comparingLong(Bucket::getDocCount); + } + + /** + * @return compare by {@link Bucket#getKey()} from the appropriate implementation. + */ + @SuppressWarnings("unchecked") + private static Comparator comparingKeys() { + return (b1, b2) -> { + if (b1 instanceof KeyComparable) { + return ((KeyComparable) b1).compareKey(b2); + } + throw new IllegalStateException("Unexpected order bucket class [" + b1.getClass() + "]"); + }; + } + + /** + * Determine if the ordering strategy is sorting on bucket count descending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket count descending, {@code false} otherwise. + */ + public static boolean isCountDesc(BucketOrder order) { + return isOrder(order, COUNT_DESC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key (ascending or descending). + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key, {@code false} otherwise. + */ + public static boolean isKeyOrder(BucketOrder order) { + return isOrder(order, KEY_ASC) || isOrder(order, KEY_DESC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key ascending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key ascending, {@code false} otherwise. + */ + public static boolean isKeyAsc(BucketOrder order) { + return isOrder(order, KEY_ASC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key descending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key descending, {@code false} otherwise. + */ + public static boolean isKeyDesc(BucketOrder order) { + return isOrder(order, KEY_DESC); + } + + /** + * Determine if the ordering strategy matches the expected one. + * + * @param order bucket ordering strategy to check. If this is a {@link CompoundOrder} the first element will be + * check instead. + * @param expected expected bucket ordering strategy. + * @return {@code true} if the order matches, {@code false} otherwise. + */ + private static boolean isOrder(BucketOrder order, BucketOrder expected) { + if (order == expected) { + return true; + } else if (order instanceof CompoundOrder) { + // check if its a compound order with the first element that matches + List orders = ((CompoundOrder) order).orderElements; + if (orders.size() >= 1) { + return isOrder(orders.get(0), expected); + } + } + return false; + } + + /** + * Contains logic for reading/writing {@link BucketOrder} from/to streams. + */ + public static class Streams { + + /** + * Read a {@link BucketOrder} from a {@link StreamInput}. + * + * @param in stream with order data to read. + * @return order read from the stream + * @throws IOException on error reading from the stream. + */ + public static BucketOrder readOrder(StreamInput in) throws IOException { + byte id = in.readByte(); + switch (id) { + case COUNT_DESC_ID: return COUNT_DESC; + case COUNT_ASC_ID: return COUNT_ASC; + case KEY_DESC_ID: return KEY_DESC; + case KEY_ASC_ID: return KEY_ASC; + case Aggregation.ID: + boolean asc = in.readBoolean(); + String key = in.readString(); + return new Aggregation(key, asc); + case CompoundOrder.ID: + int size = in.readVInt(); + List compoundOrder = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + compoundOrder.add(Streams.readOrder(in)); + } + return new CompoundOrder(compoundOrder, false); + default: + throw new RuntimeException("unknown order id [" + id + "]"); + } + } + + /** + * ONLY FOR HISTOGRAM ORDER: Backwards compatibility logic to read a {@link BucketOrder} from a {@link StreamInput}. + * + * @param in stream with order data to read. + * @param bwcOrderFlag {@code true} to check {@code in.readBoolean()} in the backwards compat logic before reading + * the order. {@code false} to skip this flag (order always present). + * @return order read from the stream + * @throws IOException on error reading from the stream. + */ + public static BucketOrder readHistogramOrder(StreamInput in, boolean bwcOrderFlag) throws IOException { + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) { + return Streams.readOrder(in); + } else { // backwards compat logic + if (bwcOrderFlag == false || in.readBoolean()) { + // translate the old histogram order IDs to the new order objects + byte id = in.readByte(); + switch (id) { + case 1: return KEY_ASC; + case 2: return KEY_DESC; + case 3: return COUNT_ASC; + case 4: return COUNT_DESC; + case 0: // aggregation order stream logic is backwards compatible + boolean asc = in.readBoolean(); + String key = in.readString(); + return new Aggregation(key, asc); + default: // not expecting compound order ID + throw new RuntimeException("unknown histogram order id [" + id + "]"); + } + } else { // default to _key asc if no order specified + return KEY_ASC; + } + } + } + + /** + * Write a {@link BucketOrder} to a {@link StreamOutput}. + * + * @param order order to write to the stream. + * @param out stream to write the order to. + * @throws IOException on error writing to the stream. + */ + public static void writeOrder(BucketOrder order, StreamOutput out) throws IOException { + out.writeByte(order.id()); + if (order instanceof Aggregation) { + Aggregation aggregationOrder = (Aggregation) order; + out.writeBoolean(aggregationOrder.asc); + out.writeString(aggregationOrder.path().toString()); + } else if (order instanceof CompoundOrder) { + CompoundOrder compoundOrder = (CompoundOrder) order; + out.writeVInt(compoundOrder.orderElements.size()); + for (BucketOrder innerOrder : compoundOrder.orderElements) { + innerOrder.writeTo(out); + } + } + } + + /** + * ONLY FOR HISTOGRAM ORDER: Backwards compatibility logic to write a {@link BucketOrder} to a stream. + * + * @param order order to write to the stream. + * @param out stream to write the order to. + * @param bwcOrderFlag {@code true} to always {@code out.writeBoolean(true)} for the backwards compat logic before + * writing the order. {@code false} to skip this flag. + * @throws IOException on error writing to the stream. + */ + public static void writeHistogramOrder(BucketOrder order, StreamOutput out, boolean bwcOrderFlag) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) { + order.writeTo(out); + } else { // backwards compat logic + if(bwcOrderFlag) { // need to add flag that determines if order exists + out.writeBoolean(true); // order always exists + } + if (order instanceof CompoundOrder) { + // older versions do not support histogram compound order; the best we can do here is use the first order. + order = ((CompoundOrder) order).orderElements.get(0); + } + if (order instanceof Aggregation) { + // aggregation order stream logic is backwards compatible + order.writeTo(out); + } else { + // convert the new order IDs to the old histogram order IDs. + byte id; + switch (order.id()) { + case COUNT_DESC_ID: id = 4; break; + case COUNT_ASC_ID: id = 3; break; + case KEY_DESC_ID: id = 2; break; + case KEY_ASC_ID: id = 1; break; + default: throw new RuntimeException("unknown order id [" + order.id() + "]"); + } + out.writeByte(id); + } + } + } + } + + /** + * Contains logic for parsing a {@link BucketOrder} from a {@link XContentParser}. + */ + public static class Parser { + + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(Parser.class)); + + /** + * Parse a {@link BucketOrder} from {@link XContent}. + * + * @param parser for parsing {@link XContent} that contains the order. + * @param context parsing context. + * @return bucket ordering strategy + * @throws IOException on error a {@link XContent} parsing error. + */ + public static BucketOrder parseOrderParam(XContentParser parser, QueryParseContext context) throws IOException { + XContentParser.Token token; + String orderKey = null; + boolean orderAsc = false; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + orderKey = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + String dir = parser.text(); + if ("asc".equalsIgnoreCase(dir)) { + orderAsc = true; + } else if ("desc".equalsIgnoreCase(dir)) { + orderAsc = false; + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unknown order direction [" + dir + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unexpected token [" + token + "] for [order]"); + } + } + if (orderKey == null) { + throw new ParsingException(parser.getTokenLocation(), + "Must specify at least one field for [order]"); + } + // _term and _time order deprecated in 6.0; replaced by _key + if ("_term".equals(orderKey) || "_time".equals(orderKey)) { + DEPRECATION_LOGGER.deprecated("Deprecated aggregation order key [{}] used, replaced by [_key]", orderKey); + } + switch (orderKey) { + case "_term": + case "_time": + case "_key": + return orderAsc ? KEY_ASC : KEY_DESC; + case "_count": + return orderAsc ? COUNT_ASC : COUNT_DESC; + default: // assume all other orders are sorting on a sub-aggregation. Validation occurs later. + return aggregation(orderKey, orderAsc); + } + } + } + + @Override + public int hashCode() { + return Objects.hash(id, key, asc); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + InternalOrder other = (InternalOrder) obj; + return Objects.equals(id, other.id) + && Objects.equals(key, other.key) + && Objects.equals(asc, other.asc); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java b/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java new file mode 100644 index 00000000000..c69cf0689c6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +/** + * Defines behavior for comparing {@link Bucket#getKey() bucket keys} to imposes a total ordering + * of buckets of the same type. + * + * @param {@link Bucket} of the same type that also implements {@link KeyComparable}. + * @see BucketOrder#key(boolean) + */ +public interface KeyComparable> { + + /** + * Compare this {@link Bucket}s {@link Bucket#getKey() key} with another bucket. + * + * @param other the bucket that contains the key to compare to. + * @return a negative integer, zero, or a positive integer as this buckets key + * is less than, equal to, or greater than the other buckets key. + * @see Comparable#compareTo(Object) + */ + int compareKey(T other); +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java index fc223916f72..24b1894455a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java @@ -19,12 +19,10 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.common.util.Comparators; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.HasAggregations; -import org.elasticsearch.search.aggregations.support.AggregationPath; import java.util.List; @@ -58,31 +56,6 @@ public interface MultiBucketsAggregation extends Aggregation { @Override Aggregations getAggregations(); - class SubAggregationComparator implements java.util.Comparator { - - private final AggregationPath path; - private final boolean asc; - - public SubAggregationComparator(String expression, boolean asc) { - this.asc = asc; - this.path = AggregationPath.parse(expression); - } - - public boolean asc() { - return asc; - } - - public AggregationPath path() { - return path; - } - - @Override - public int compare(B b1, B b2) { - double v1 = path.resolveValue(b1); - double v2 = path.resolveValue(b2); - return Comparators.compareDiscardNaN(v1, v2, asc); - } - } } /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 96b08e3bada..4bbe2c5abe1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.rounding.DateTimeUnit; @@ -28,10 +27,12 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -44,6 +45,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -113,8 +115,8 @@ public class DateHistogramAggregationBuilder PARSER.declareField(DateHistogramAggregationBuilder::extendedBounds, parser -> ExtendedBounds.PARSER.apply(parser, null), ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT); - PARSER.declareField(DateHistogramAggregationBuilder::order, DateHistogramAggregationBuilder::parseOrder, - Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(DateHistogramAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, + Histogram.ORDER_FIELD); } public static DateHistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException { @@ -125,7 +127,7 @@ public class DateHistogramAggregationBuilder private DateHistogramInterval dateHistogramInterval; private long offset = 0; private ExtendedBounds extendedBounds; - private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; + private BucketOrder order = BucketOrder.key(true); private boolean keyed = false; private long minDocCount = 0; @@ -137,9 +139,7 @@ public class DateHistogramAggregationBuilder /** Read from a stream, for internal use only. */ public DateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in, ValuesSourceType.NUMERIC, ValueType.DATE); - if (in.readBoolean()) { - order = InternalOrder.Streams.readOrder(in); - } + order = InternalOrder.Streams.readHistogramOrder(in, true); keyed = in.readBoolean(); minDocCount = in.readVLong(); interval = in.readLong(); @@ -150,11 +150,7 @@ public class DateHistogramAggregationBuilder @Override protected void innerWriteTo(StreamOutput out) throws IOException { - boolean hasOrder = order != null; - out.writeBoolean(hasOrder); - if (hasOrder) { - InternalOrder.Streams.writeOrder(order, out); - } + InternalOrder.Streams.writeHistogramOrder(order, out, true); out.writeBoolean(keyed); out.writeVLong(minDocCount); out.writeLong(interval); @@ -244,17 +240,34 @@ public class DateHistogramAggregationBuilder } /** Return the order to use to sort buckets of this histogram. */ - public Histogram.Order order() { + public BucketOrder order() { return order; } /** Set a new order on this builder and return the builder so that calls - * can be chained. */ - public DateHistogramAggregationBuilder order(Histogram.Order order) { + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public DateHistogramAggregationBuilder order(BucketOrder order) { if (order == null) { throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); } - this.order = (InternalOrder) order; + if(order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } + return this; + } + + /** + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. + */ + public DateHistogramAggregationBuilder order(List orders) { + if (orders == null) { + throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); + } + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } @@ -370,35 +383,4 @@ public class DateHistogramAggregationBuilder && Objects.equals(offset, other.offset) && Objects.equals(extendedBounds, other.extendedBounds); } - - // similar to the parsing oh histogram orders, but also accepts _time as an alias for _key - private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException { - InternalOrder order = null; - Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - String dir = parser.text(); - boolean asc = "asc".equals(dir); - if (!asc && !"desc".equals(dir)) { - throw new ParsingException(parser.getTokenLocation(), "Unknown order direction: [" + dir - + "]. Should be either [asc] or [desc]"); - } - order = resolveOrder(currentFieldName, asc); - } - } - return order; - } - - static InternalOrder resolveOrder(String key, boolean asc) { - if ("_key".equals(key) || "_time".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC); - } - if ("_count".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC); - } - return new InternalOrder.Aggregation(key, asc); - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 871cade50cd..f5f7877572a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -53,7 +55,7 @@ class DateHistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; @@ -62,7 +64,7 @@ class DateHistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; private long offset; - DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order, + DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, BucketOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, @@ -71,7 +73,7 @@ class DateHistogramAggregator extends BucketsAggregator { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; this.offset = offset; - this.order = order; + this.order = InternalOrder.validate(order, this);; this.keyed = keyed; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; @@ -131,7 +133,7 @@ class DateHistogramAggregator extends BucketsAggregator { } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator()); + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 44bb3e02afe..a64e0182888 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -40,14 +41,14 @@ public final class DateHistogramAggregatorFactory private final DateHistogramInterval dateHistogramInterval; private final long interval; private final long offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final ExtendedBounds extendedBounds; private Rounding rounding; public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long interval, - DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount, + DateHistogramInterval dateHistogramInterval, long offset, BucketOrder order, boolean keyed, long minDocCount, Rounding rounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java index 3ac87de81ed..07e2eb87962 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java @@ -19,10 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import java.util.Comparator; import java.util.List; /** @@ -50,83 +48,4 @@ public interface Histogram extends MultiBucketsAggregation { @Override List getBuckets(); - /** - * A strategy defining the order in which the buckets in this histogram are ordered. - */ - abstract class Order implements ToXContent { - - private static int compareKey(Histogram.Bucket b1, Histogram.Bucket b2) { - if (b1 instanceof InternalHistogram.Bucket) { - return Double.compare(((InternalHistogram.Bucket) b1).key, ((InternalHistogram.Bucket) b2).key); - } else if (b1 instanceof InternalDateHistogram.Bucket) { - return Long.compare(((InternalDateHistogram.Bucket) b1).key, ((InternalDateHistogram.Bucket) b2).key); - } else { - throw new IllegalStateException("Unexpected impl: " + b1.getClass()); - } - } - - public static final Order KEY_ASC = new InternalOrder((byte) 1, "_key", true, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - return compareKey(b1, b2); - } - }); - - public static final Order KEY_DESC = new InternalOrder((byte) 2, "_key", false, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - return compareKey(b2, b1); - } - }); - - public static final Order COUNT_ASC = new InternalOrder((byte) 3, "_count", true, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - int cmp = Long.compare(b1.getDocCount(), b2.getDocCount()); - if (cmp == 0) { - cmp = compareKey(b1, b2); - } - return cmp; - } - }); - - - public static final Order COUNT_DESC = new InternalOrder((byte) 4, "_count", false, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - int cmp = Long.compare(b2.getDocCount(), b1.getDocCount()); - if (cmp == 0) { - cmp = compareKey(b1, b2); - } - return cmp; - } - }); - - /** - * Creates a bucket ordering strategy that sorts buckets based on a single-valued calc sug-aggregation - * - * @param path the name of the aggregation - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String path, boolean asc) { - return new InternalOrder.Aggregation(path, asc); - } - - /** - * Creates a bucket ordering strategy that sorts buckets based on a multi-valued calc sug-aggregation - * - * @param aggregationName the name of the aggregation - * @param valueName The name of the value of the multi-value get by which the sorting will be applied - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String aggregationName, String valueName, boolean asc) { - return new InternalOrder.Aggregation(aggregationName + "." + valueName, asc); - } - - /** - * @return The bucket comparator by which the order will be applied. - */ - abstract Comparator comparator(); - - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 87c7404c088..9362c0b8f77 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -20,16 +20,16 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,6 +41,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Objects; /** @@ -75,8 +76,8 @@ public class HistogramAggregationBuilder histogram.extendedBounds(extendedBounds[0], extendedBounds[1]); }, parser -> EXTENDED_BOUNDS_PARSER.apply(parser, null), ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT); - PARSER.declareField(HistogramAggregationBuilder::order, HistogramAggregationBuilder::parseOrder, - Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(HistogramAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, + Histogram.ORDER_FIELD); } public static HistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException { @@ -87,7 +88,7 @@ public class HistogramAggregationBuilder private double offset = 0; private double minBound = Double.POSITIVE_INFINITY; private double maxBound = Double.NEGATIVE_INFINITY; - private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; + private BucketOrder order = BucketOrder.key(true); private boolean keyed = false; private long minDocCount = 0; @@ -99,9 +100,7 @@ public class HistogramAggregationBuilder /** Read from a stream, for internal use only. */ public HistogramAggregationBuilder(StreamInput in) throws IOException { super(in, ValuesSourceType.NUMERIC, ValueType.DOUBLE); - if (in.readBoolean()) { - order = InternalOrder.Streams.readOrder(in); - } + order = InternalOrder.Streams.readHistogramOrder(in, true); keyed = in.readBoolean(); minDocCount = in.readVLong(); interval = in.readDouble(); @@ -112,11 +111,7 @@ public class HistogramAggregationBuilder @Override protected void innerWriteTo(StreamOutput out) throws IOException { - boolean hasOrder = order != null; - out.writeBoolean(hasOrder); - if (hasOrder) { - InternalOrder.Streams.writeOrder(order, out); - } + InternalOrder.Streams.writeHistogramOrder(order, out, true); out.writeBoolean(keyed); out.writeVLong(minDocCount); out.writeDouble(interval); @@ -185,17 +180,34 @@ public class HistogramAggregationBuilder } /** Return the order to use to sort buckets of this histogram. */ - public Histogram.Order order() { + public BucketOrder order() { return order; } /** Set a new order on this builder and return the builder so that calls - * can be chained. */ - public HistogramAggregationBuilder order(Histogram.Order order) { + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public HistogramAggregationBuilder order(BucketOrder order) { if (order == null) { throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); } - this.order = (InternalOrder) order; + if(order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } + return this; + } + + /** + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. + */ + public HistogramAggregationBuilder order(List orders) { + if (orders == null) { + throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); + } + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } @@ -286,34 +298,4 @@ public class HistogramAggregationBuilder && Objects.equals(minBound, other.minBound) && Objects.equals(maxBound, other.maxBound); } - - private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException { - InternalOrder order = null; - Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - String dir = parser.text(); - boolean asc = "asc".equals(dir); - if (!asc && !"desc".equals(dir)) { - throw new ParsingException(parser.getTokenLocation(), "Unknown order direction: [" + dir - + "]. Should be either [asc] or [desc]"); - } - order = resolveOrder(currentFieldName, asc); - } - } - return order; - } - - static InternalOrder resolveOrder(String key, boolean asc) { - if ("_key".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC); - } - if ("_count".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC); - } - return new InternalOrder.Aggregation(key, asc); - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index a3a038cfa3c..0c2ba554c0b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -34,6 +34,8 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -54,7 +56,7 @@ class HistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final double interval, offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final double minBound, maxBound; @@ -62,7 +64,7 @@ class HistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, - InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -73,7 +75,7 @@ class HistogramAggregator extends BucketsAggregator { } this.interval = interval; this.offset = offset; - this.order = order; + this.order = InternalOrder.validate(order, this); this.keyed = keyed; this.minDocCount = minDocCount; this.minBound = minBound; @@ -137,7 +139,7 @@ class HistogramAggregator extends BucketsAggregator { } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator()); + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); EmptyBucketInfo emptyBucketInfo = null; if (minDocCount == 0) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 939210b63a6..c478d1262ea 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,13 +37,13 @@ import java.util.Map; public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { private final double interval, offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final double minBound, maxBound; HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, - InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); @@ -80,4 +81,4 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact throws IOException { return createAggregator(null, parent, pipelineAggregators, metaData); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index dc05ab51e8a..c3eab06f28a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -31,6 +31,9 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -49,7 +52,7 @@ import java.util.Objects; public final class InternalDateHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { + public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { final long key; final long docCount; @@ -151,6 +154,11 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< return builder; } + @Override + public int compareKey(Bucket other) { + return Long.compare(key, other.key); + } + public DocValueFormat getFormatter() { return format; } @@ -206,14 +214,14 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< } private final List buckets; - private final InternalOrder order; + private final BucketOrder order; private final DocValueFormat format; private final boolean keyed; private final long minDocCount; private final long offset; private final EmptyBucketInfo emptyBucketInfo; - InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, long offset, + InternalDateHistogram(String name, List buckets, BucketOrder order, long minDocCount, long offset, EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { @@ -233,7 +241,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< */ public InternalDateHistogram(StreamInput in) throws IOException { super(in); - order = InternalOrder.Streams.readOrder(in); + order = InternalOrder.Streams.readHistogramOrder(in, false); minDocCount = in.readVLong(); if (minDocCount == 0) { emptyBucketInfo = new EmptyBucketInfo(in); @@ -248,7 +256,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override protected void doWriteTo(StreamOutput out) throws IOException { - InternalOrder.Streams.writeOrder(order, out); + InternalOrder.Streams.writeHistogramOrder(order, out, false); out.writeVLong(minDocCount); if (minDocCount == 0) { emptyBucketInfo.writeTo(out); @@ -416,18 +424,18 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< addEmptyBuckets(reducedBuckets, reduceContext); } - if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) { + if (InternalOrder.isKeyAsc(order) || reduceContext.isFinalReduce() == false) { // nothing to do, data are already sorted since shards return // sorted buckets and the merge-sort performed by reduceBuckets // maintains order - } else if (order == InternalOrder.KEY_DESC) { + } else if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... List reverse = new ArrayList<>(reducedBuckets); Collections.reverse(reverse); reducedBuckets = reverse; } else { - // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort - CollectionUtil.introSort(reducedBuckets, order.comparator()); + // sorted by compound order or sub-aggregation, need to fall back to a costly n*log(n) sort + CollectionUtil.introSort(reducedBuckets, order.comparator(null)); } return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index c6d2aa9eeb9..035dd9de055 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -30,6 +30,9 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import java.io.IOException; import java.util.ArrayList; @@ -45,7 +48,7 @@ import java.util.Objects; */ public final class InternalHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { + public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { final double key; final long docCount; @@ -147,6 +150,11 @@ public final class InternalHistogram extends InternalMultiBucketAggregation buckets; - private final InternalOrder order; + private final BucketOrder order; private final DocValueFormat format; private final boolean keyed; private final long minDocCount; private final EmptyBucketInfo emptyBucketInfo; - InternalHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, + InternalHistogram(String name, List buckets, BucketOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); @@ -225,7 +233,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation reverse = new ArrayList<>(reducedBuckets); Collections.reverse(reverse); reducedBuckets = reverse; } else { - // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort - CollectionUtil.introSort(reducedBuckets, order.comparator()); + // sorted by compound order or sub-aggregation, need to fall back to a costly n*log(n) sort + CollectionUtil.introSort(reducedBuckets, order.comparator(null)); } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(), diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java deleted file mode 100644 index 5cf2f83baa8..00000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; - -import java.io.IOException; -import java.util.Comparator; -import java.util.Objects; - -/** - * An internal {@link Histogram.Order} strategy which is identified by a unique id. - */ -class InternalOrder extends Histogram.Order { - - final byte id; - final String key; - final boolean asc; - final Comparator comparator; - - InternalOrder(byte id, String key, boolean asc, Comparator comparator) { - this.id = id; - this.key = key; - this.asc = asc; - this.comparator = comparator; - } - - byte id() { - return id; - } - - String key() { - return key; - } - - boolean asc() { - return asc; - } - - @Override - Comparator comparator() { - return comparator; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); - } - - @Override - public int hashCode() { - return Objects.hash(id, key, asc); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - InternalOrder other = (InternalOrder) obj; - return Objects.equals(id, other.id) - && Objects.equals(key, other.key) - && Objects.equals(asc, other.asc); - } - - static class Aggregation extends InternalOrder { - - static final byte ID = 0; - - Aggregation(String key, boolean asc) { - super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator(key, asc)); - } - - } - - static class Streams { - - /** - * Writes the given order to the given output (based on the id of the order). - */ - public static void writeOrder(InternalOrder order, StreamOutput out) throws IOException { - out.writeByte(order.id()); - if (order instanceof InternalOrder.Aggregation) { - out.writeBoolean(order.asc()); - out.writeString(order.key()); - } - } - - /** - * Reads an order from the given input (based on the id of the order). - * - * @see Streams#writeOrder(InternalOrder, org.elasticsearch.common.io.stream.StreamOutput) - */ - public static InternalOrder readOrder(StreamInput in) throws IOException { - byte id = in.readByte(); - switch (id) { - case 1: return (InternalOrder) Histogram.Order.KEY_ASC; - case 2: return (InternalOrder) Histogram.Order.KEY_DESC; - case 3: return (InternalOrder) Histogram.Order.COUNT_ASC; - case 4: return (InternalOrder) Histogram.Order.COUNT_DESC; - case 0: - boolean asc = in.readBoolean(); - String key = in.readString(); - return new InternalOrder.Aggregation(key, asc); - default: - throw new RuntimeException("unknown histogram order"); - } - } - - } - - -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java index 9b40fcc42dc..edbf2aef25f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -37,7 +38,7 @@ abstract class AbstractStringTermsAggregator extends TermsAggregator { protected final boolean showTermDocCountError; AbstractStringTermsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, bucketCountThresholds, order, format, subAggCollectMode, pipelineAggregators, metaData); this.showTermDocCountError = showTermDocCountError; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index e4c7906f215..e4885bc0539 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.ArrayList; @@ -76,8 +77,8 @@ public class DoubleTerms extends InternalMappedTerms pipelineAggregators, + public DoubleTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index cf7478e3196..0ae42abd9a4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.internal.SearchContext; @@ -39,7 +40,7 @@ import java.util.stream.Collectors; public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, + BucketOrder order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index f315d915f0d..33bbc370c6e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -43,6 +43,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -70,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected SortedSetDocValues globalOrds; public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -122,8 +123,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (ords.advanceExact(doc)) { - for (long globalOrd = ords.nextOrd(); - globalOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long globalOrd = ords.nextOrd(); + globalOrd != SortedSetDocValues.NO_MORE_ORDS; globalOrd = ords.nextOrd()) { collectExistingBucket(sub, doc, globalOrd); } @@ -218,8 +219,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr } @Override - public int compareTerm(Terms.Bucket other) { - return Long.compare(globalOrd, ((OrdBucket) other).globalOrd); + public int compareKey(OrdBucket other) { + return Long.compare(globalOrd, other.globalOrd); } @Override @@ -261,7 +262,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final LongHash bucketOrds; - public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, Terms.Order order, + public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -296,8 +297,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr @Override public void collect(int doc, long bucket) throws IOException { if (ords.advanceExact(doc)) { - for (long globalOrd = ords.nextOrd(); - globalOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long globalOrd = ords.nextOrd(); + globalOrd != SortedSetDocValues.NO_MORE_ORDS; globalOrd = ords.nextOrd()) { long bucketOrd = bucketOrds.add(globalOrd); if (bucketOrd < 0) { @@ -337,7 +338,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private SortedSetDocValues segmentOrds; public LowCardinality(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, - Terms.Order order, DocValueFormat format, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -371,8 +372,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (ords.advanceExact(doc)) { - for (long segmentOrd = ords.nextOrd(); - segmentOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long segmentOrd = ords.nextOrd(); + segmentOrd != SortedSetDocValues.NO_MORE_ORDS; segmentOrd = ords.nextOrd()) { segmentDocCounts.increment(segmentOrd + 1, 1); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index 57c80c5fb40..26f8d809fe5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.Iterator; @@ -46,7 +47,7 @@ public abstract class InternalMappedTerms, B exten protected long docCountError; - protected InternalMappedTerms(String name, Terms.Order order, int requiredSize, long minDocCount, + protected InternalMappedTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData); @@ -83,7 +84,7 @@ public abstract class InternalMappedTerms, B exten @Override protected void setDocCountError(long docCountError) { - this.docCountError = docCountError; + this.docCountError = docCountError; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java deleted file mode 100644 index 513e7a1ac0e..00000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.terms; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.Comparators; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; -import org.elasticsearch.search.aggregations.support.AggregationPath; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; - -class InternalOrder extends Terms.Order { - - private static final byte COUNT_DESC_ID = 1; - private static final byte COUNT_ASC_ID = 2; - private static final byte TERM_DESC_ID = 3; - private static final byte TERM_ASC_ID = 4; - - /** - * Order by the (higher) count of each term. - */ - public static final InternalOrder COUNT_DESC = new InternalOrder(COUNT_DESC_ID, "_count", false, new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return Long.compare(o2.getDocCount(), o1.getDocCount()); - } - }); - - /** - * Order by the (lower) count of each term. - */ - public static final InternalOrder COUNT_ASC = new InternalOrder(COUNT_ASC_ID, "_count", true, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return Long.compare(o1.getDocCount(), o2.getDocCount()); - } - }); - - /** - * Order by the terms. - */ - public static final InternalOrder TERM_DESC = new InternalOrder(TERM_DESC_ID, "_term", false, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return o2.compareTerm(o1); - } - }); - - /** - * Order by the terms. - */ - public static final InternalOrder TERM_ASC = new InternalOrder(TERM_ASC_ID, "_term", true, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return o1.compareTerm(o2); - } - }); - - public static boolean isCountDesc(Terms.Order order) { - if (order == COUNT_DESC) { - return true; - } else if (order instanceof CompoundOrder) { - // check if its a compound order with count desc and the tie breaker (term asc) - CompoundOrder compoundOrder = (CompoundOrder) order; - if (compoundOrder.orderElements.size() == 2 && compoundOrder.orderElements.get(0) == COUNT_DESC && compoundOrder.orderElements.get(1) == TERM_ASC) { - return true; - } - } - return false; - } - - public static boolean isTermOrder(Terms.Order order) { - if (order == TERM_ASC) { - return true; - } else if (order == TERM_DESC) { - return true; - } else if (order instanceof CompoundOrder) { - // check if its a compound order with only a single element ordering - // by term - CompoundOrder compoundOrder = (CompoundOrder) order; - if (compoundOrder.orderElements.size() == 1 && compoundOrder.orderElements.get(0) == TERM_ASC - || compoundOrder.orderElements.get(0) == TERM_DESC) { - return true; - } - } - return false; - } - - final byte id; - final String key; - final boolean asc; - protected final Comparator comparator; - - InternalOrder(byte id, String key, boolean asc, Comparator comparator) { - this.id = id; - this.key = key; - this.asc = asc; - this.comparator = comparator; - } - - @Override - byte id() { - return id; - } - - @Override - protected Comparator comparator(Aggregator aggregator) { - return comparator; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); - } - - public static Terms.Order validate(Terms.Order order, Aggregator termsAggregator) { - if (order instanceof CompoundOrder) { - for (Terms.Order innerOrder : ((CompoundOrder)order).orderElements) { - validate(innerOrder, termsAggregator); - } - return order; - } else if (!(order instanceof Aggregation)) { - return order; - } - AggregationPath path = ((Aggregation) order).path(); - path.validate(termsAggregator); - return order; - } - - static class Aggregation extends InternalOrder { - - static final byte ID = 0; - - Aggregation(String key, boolean asc) { - super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator(key, asc)); - } - - AggregationPath path() { - return ((MultiBucketsAggregation.Bucket.SubAggregationComparator) comparator).path(); - } - - @Override - protected Comparator comparator(Aggregator termsAggregator) { - if (termsAggregator == null) { - return comparator; - } - - // Internal Optimization: - // - // in this phase, if the order is based on sub-aggregations, we need to use a different comparator - // to avoid constructing buckets for ordering purposes (we can potentially have a lot of buckets and building - // them will cause loads of redundant object constructions). The "special" comparators here will fetch the - // sub aggregation values directly from the sub aggregators bypassing bucket creation. Note that the comparator - // attached to the order will still be used in the reduce phase of the Aggregation. - - AggregationPath path = path(); - final Aggregator aggregator = path.resolveAggregator(termsAggregator); - final String key = path.lastPathElement().key; - - if (aggregator instanceof SingleBucketAggregator) { - assert key == null : "this should be picked up before the aggregation is executed - on validate"; - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - int mul = asc ? 1 : -1; - int v1 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) o1).bucketOrd); - int v2 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) o2).bucketOrd); - return mul * (v1 - v2); - } - }; - } - - // with only support single-bucket aggregators - assert !(aggregator instanceof BucketsAggregator) : "this should be picked up before the aggregation is executed - on validate"; - - if (aggregator instanceof NumericMetricsAggregator.MultiValue) { - assert key != null : "this should be picked up before the aggregation is executed - on validate"; - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - double v1 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) o1).bucketOrd); - double v2 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) o2).bucketOrd); - // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to - // the bottom - return Comparators.compareDiscardNaN(v1, v2, asc); - } - }; - } - - // single-value metrics agg - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - double v1 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o1).bucketOrd); - double v2 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o2).bucketOrd); - // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to - // the bottom - return Comparators.compareDiscardNaN(v1, v2, asc); - } - }; - } - } - - static class CompoundOrder extends Terms.Order { - - static final byte ID = -1; - - private final List orderElements; - - CompoundOrder(List compoundOrder) { - this(compoundOrder, true); - } - - CompoundOrder(List compoundOrder, boolean absoluteOrdering) { - this.orderElements = new LinkedList<>(compoundOrder); - Terms.Order lastElement = compoundOrder.get(compoundOrder.size() - 1); - if (absoluteOrdering && !(InternalOrder.TERM_ASC == lastElement || InternalOrder.TERM_DESC == lastElement)) { - // add term order ascending as a tie-breaker to avoid non-deterministic ordering - // if all user provided comparators return 0. - this.orderElements.add(Order.term(true)); - } - } - - @Override - byte id() { - return ID; - } - - List orderElements() { - return Collections.unmodifiableList(orderElements); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(); - for (Terms.Order order : orderElements) { - order.toXContent(builder, params); - } - return builder.endArray(); - } - - @Override - protected Comparator comparator(Aggregator aggregator) { - return new CompoundOrderComparator(orderElements, aggregator); - } - - @Override - public int hashCode() { - return Objects.hash(orderElements); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - CompoundOrder other = (CompoundOrder) obj; - return Objects.equals(orderElements, other.orderElements); - } - - public static class CompoundOrderComparator implements Comparator { - - private List compoundOrder; - private Aggregator aggregator; - - CompoundOrderComparator(List compoundOrder, Aggregator aggregator) { - this.compoundOrder = compoundOrder; - this.aggregator = aggregator; - } - - @Override - public int compare(Bucket o1, Bucket o2) { - int result = 0; - for (Iterator itr = compoundOrder.iterator(); itr.hasNext() && result == 0;) { - result = itr.next().comparator(aggregator).compare(o1, o2); - } - return result; - } - } - } - - public static class Streams { - - public static void writeOrder(Terms.Order order, StreamOutput out) throws IOException { - if (order instanceof Aggregation) { - out.writeByte(order.id()); - Aggregation aggregationOrder = (Aggregation) order; - out.writeBoolean(((MultiBucketsAggregation.Bucket.SubAggregationComparator) aggregationOrder.comparator).asc()); - AggregationPath path = ((Aggregation) order).path(); - out.writeString(path.toString()); - } else if (order instanceof CompoundOrder) { - CompoundOrder compoundOrder = (CompoundOrder) order; - out.writeByte(order.id()); - out.writeVInt(compoundOrder.orderElements.size()); - for (Terms.Order innerOrder : compoundOrder.orderElements) { - Streams.writeOrder(innerOrder, out); - } - } else { - out.writeByte(order.id()); - } - } - - public static Terms.Order readOrder(StreamInput in) throws IOException { - return readOrder(in, false); - } - - public static Terms.Order readOrder(StreamInput in, boolean absoluteOrder) throws IOException { - byte id = in.readByte(); - switch (id) { - case COUNT_DESC_ID: return absoluteOrder ? new CompoundOrder(Collections.singletonList((Terms.Order) InternalOrder.COUNT_DESC)) : InternalOrder.COUNT_DESC; - case COUNT_ASC_ID: return absoluteOrder ? new CompoundOrder(Collections.singletonList((Terms.Order) InternalOrder.COUNT_ASC)) : InternalOrder.COUNT_ASC; - case TERM_DESC_ID: return InternalOrder.TERM_DESC; - case TERM_ASC_ID: return InternalOrder.TERM_ASC; - case Aggregation.ID: - boolean asc = in.readBoolean(); - String key = in.readString(); - return new InternalOrder.Aggregation(key, asc); - case CompoundOrder.ID: - int size = in.readVInt(); - List compoundOrder = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - compoundOrder.add(Streams.readOrder(in, false)); - } - return new CompoundOrder(compoundOrder, absoluteOrder); - default: - throw new RuntimeException("unknown terms order"); - } - } - } - - @Override - public int hashCode() { - return Objects.hash(id, asc); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - InternalOrder other = (InternalOrder) obj; - return Objects.equals(id, other.id) - && Objects.equals(asc, other.asc); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 3834f9a65be..24d1d301623 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -31,6 +31,9 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import java.io.IOException; import java.util.ArrayList; @@ -46,8 +49,8 @@ public abstract class InternalTerms, B extends Int protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound"); protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count"); - public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket { - + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket + implements Terms.Bucket, KeyComparable { /** * Reads a bucket. Should be a constructor reference. */ @@ -177,11 +180,11 @@ public abstract class InternalTerms, B extends Int } } - protected final Terms.Order order; + protected final BucketOrder order; protected final int requiredSize; protected final long minDocCount; - protected InternalTerms(String name, Terms.Order order, int requiredSize, long minDocCount, + protected InternalTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.order = order; @@ -201,7 +204,7 @@ public abstract class InternalTerms, B extends Int @Override protected final void doWriteTo(StreamOutput out) throws IOException { - InternalOrder.Streams.writeOrder(order, out); + order.writeTo(out); writeSize(requiredSize, out); out.writeVLong(minDocCount); writeTermTypeInfoTo(out); @@ -238,9 +241,9 @@ public abstract class InternalTerms, B extends Int } otherDocCount += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError; - if (terms.getBuckets().size() < getShardSize() || InternalOrder.isTermOrder(order)) { + if (terms.getBuckets().size() < getShardSize() || InternalOrder.isKeyOrder(order)) { thisAggDocCountError = 0; - } else if (InternalOrder.isCountDesc(this.order)) { + } else if (InternalOrder.isCountDesc(order)) { if (terms.getDocCountError() > 0) { // If there is an existing docCountError for this agg then // use this as the error for this aggregation diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 0de13a4d98f..025c397d3bd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.ArrayList; @@ -76,8 +77,8 @@ public class LongTerms extends InternalMappedTerms } @Override - public int compareTerm(Terms.Bucket other) { - return Long.compare(term, ((Number) other.getKey()).longValue()); + public int compareKey(Bucket other) { + return Long.compare(term, other.term); } @Override @@ -105,7 +106,7 @@ public class LongTerms extends InternalMappedTerms } } - public LongTerms(String name, Terms.Order order, int requiredSize, long minDocCount, List pipelineAggregators, + public LongTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index 8f8e2f3079b..752666de678 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -32,6 +32,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriority import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude.LongFilter; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -50,7 +52,7 @@ public class LongTermsAggregator extends TermsAggregator { private LongFilter longFilter; public LongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, + BucketOrder order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, bucketCountThresholds, order, format, subAggCollectMode, pipelineAggregators, metaData); @@ -106,7 +108,7 @@ public class LongTermsAggregator extends TermsAggregator { public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; - if (bucketCountThresholds.getMinDocCount() == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { + if (bucketCountThresholds.getMinDocCount() == 0 && (InternalOrder.isCountDesc(order) == false || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { // we need to fill-in the blanks for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) { final SortedNumericDocValues values = getValues(valuesSource, ctx); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java index 6e24bd00383..821bb000e33 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java @@ -93,11 +93,6 @@ public abstract class ParsedTerms extends ParsedMultiBucketAggregation pipelineAggregators, + public StringTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index 61c46cdfd68..6161f7912a8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -51,7 +53,7 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { private final IncludeExclude.StringFilter includeExclude; public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.StringFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -110,7 +112,7 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; - if (bucketCountThresholds.getMinDocCount() == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { + if (bucketCountThresholds.getMinDocCount() == 0 && (InternalOrder.isCountDesc(order) == false || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { // we need to fill-in the blanks for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) { final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java index 166ece4e112..f14ecae7d16 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java @@ -18,12 +18,8 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import java.util.Arrays; -import java.util.Comparator; import java.util.List; /** @@ -39,8 +35,6 @@ public interface Terms extends MultiBucketsAggregation { Number getKeyAsNumber(); - int compareTerm(Terms.Bucket other); - long getDocCountError(); } @@ -65,84 +59,4 @@ public interface Terms extends MultiBucketsAggregation { * it to the top buckets. */ long getSumOfOtherDocCounts(); - - /** - * Determines the order by which the term buckets will be sorted - */ - abstract class Order implements ToXContent { - - /** - * @return a bucket ordering strategy that sorts buckets by their document counts (ascending or descending) - */ - public static Order count(boolean asc) { - return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC; - } - - /** - * @return a bucket ordering strategy that sorts buckets by their terms (ascending or descending) - */ - public static Order term(boolean asc) { - return asc ? InternalOrder.TERM_ASC : InternalOrder.TERM_DESC; - } - - /** - * Creates a bucket ordering strategy which sorts buckets based on a single-valued calc get - * - * @param path the name of the get - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String path, boolean asc) { - return new InternalOrder.Aggregation(path, asc); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based on a multi-valued calc get - * - * @param aggregationName the name of the get - * @param metricName The name of the value of the multi-value get by which the sorting will be applied - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String aggregationName, String metricName, boolean asc) { - return new InternalOrder.Aggregation(aggregationName + "." + metricName, asc); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based multiple criteria - * - * @param orders a list of {@link Order} objects to sort on, in order of priority - */ - public static Order compound(List orders) { - return new InternalOrder.CompoundOrder(orders); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based multiple criteria - * - * @param orders a list of {@link Order} parameters to sort on, in order of priority - */ - public static Order compound(Order... orders) { - return compound(Arrays.asList(orders)); - } - - /** - * @return A comparator for the bucket based on the given terms aggregator. The comparator is used in two phases: - * - * - aggregation phase, where each shard builds a list of term buckets to be sent to the coordinating node. - * In this phase, the passed in aggregator will be the terms aggregator that aggregates the buckets on the - * shard level. - * - * - reduce phase, where the coordinating node gathers all the buckets from all the shards and reduces them - * to a final bucket list. In this case, the passed in aggregator will be {@code null} - */ - protected abstract Comparator comparator(Aggregator aggregator); - - abstract byte id(); - - @Override - public abstract int hashCode(); - - @Override - public abstract boolean equals(Object obj); - - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 944f9fd96a4..cb239781e3e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -19,20 +19,20 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -82,7 +82,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder SubAggCollectionMode.parse(p.text()), SubAggCollectionMode.KEY, ObjectParser.ValueType.STRING); - PARSER.declareObjectArray(TermsAggregationBuilder::order, TermsAggregationBuilder::parseOrderParam, + PARSER.declareObjectArray(TermsAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, TermsAggregationBuilder.ORDER_FIELD); PARSER.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())), @@ -96,7 +96,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder orders) { + public TermsAggregationBuilder order(List orders) { if (orders == null) { throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); } - order(Terms.Order.compound(orders)); + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } /** * Gets the order in which the buckets will be returned. */ - public Terms.Order order() { + public BucketOrder order() { return order; } @@ -327,45 +332,4 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder aggsUsedForSorting = new HashSet<>(); protected final SubAggCollectionMode collectMode; public TermsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, - BucketCountThresholds bucketCountThresholds, Terms.Order order, DocValueFormat format, SubAggCollectionMode collectMode, + BucketCountThresholds bucketCountThresholds, BucketOrder order, DocValueFormat format, SubAggCollectionMode collectMode, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.bucketCountThresholds = bucketCountThresholds; @@ -186,7 +193,7 @@ public abstract class TermsAggregator extends BucketsAggregator { aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); } else if (order instanceof CompoundOrder) { CompoundOrder compoundOrder = (CompoundOrder) order; - for (Terms.Order orderElement : compoundOrder.orderElements()) { + for (BucketOrder orderElement : compoundOrder.orderElements()) { if (orderElement instanceof Aggregation) { AggregationPath path = ((Aggregation) orderElement).path(); aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); @@ -195,6 +202,58 @@ public abstract class TermsAggregator extends BucketsAggregator { } } + /** + * Internal Optimization for ordering {@link InternalTerms.Bucket}s by a sub aggregation. + *

+ * in this phase, if the order is based on sub-aggregations, we need to use a different comparator + * to avoid constructing buckets for ordering purposes (we can potentially have a lot of buckets and building + * them will cause loads of redundant object constructions). The "special" comparators here will fetch the + * sub aggregation values directly from the sub aggregators bypassing bucket creation. Note that the comparator + * attached to the order will still be used in the reduce phase of the Aggregation. + * + * @param path determines which sub aggregation to use for ordering. + * @param asc {@code true} for ascending order, {@code false} for descending. + * @return {@code Comparator} to order {@link InternalTerms.Bucket}s in the desired order. + */ + public Comparator bucketComparator(AggregationPath path, boolean asc) { + + final Aggregator aggregator = path.resolveAggregator(this); + final String key = path.lastPathElement().key; + + if (aggregator instanceof SingleBucketAggregator) { + assert key == null : "this should be picked up before the aggregation is executed - on validate"; + return (b1, b2) -> { + int mul = asc ? 1 : -1; + int v1 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) b1).bucketOrd); + int v2 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) b2).bucketOrd); + return mul * (v1 - v2); + }; + } + + // with only support single-bucket aggregators + assert !(aggregator instanceof BucketsAggregator) : "this should be picked up before the aggregation is executed - on validate"; + + if (aggregator instanceof NumericMetricsAggregator.MultiValue) { + assert key != null : "this should be picked up before the aggregation is executed - on validate"; + return (b1, b2) -> { + double v1 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) b1).bucketOrd); + double v2 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) b2).bucketOrd); + // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to + // the bottom + return Comparators.compareDiscardNaN(v1, v2, asc); + }; + } + + // single-value metrics agg + return (b1, b2) -> { + double v1 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) b1).bucketOrd); + double v2 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) b2).bucketOrd); + // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to + // the bottom + return Comparators.compareDiscardNaN(v1, v2, asc); + }; + } + @Override protected boolean shouldDefer(Aggregator aggregator) { return collectMode == SubAggCollectionMode.BREADTH_FIRST diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 10fef554555..9a06dfe66f5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.bucket.BucketUtils; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -44,14 +46,14 @@ import java.util.Map; public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { - private final Terms.Order order; + private final BucketOrder order; private final IncludeExclude includeExclude; private final String executionHint; private final SubAggCollectionMode collectMode; private final TermsAggregator.BucketCountThresholds bucketCountThresholds; private boolean showTermDocCountError; - public TermsAggregatorFactory(String name, ValuesSourceConfig config, Terms.Order order, + public TermsAggregatorFactory(String name, ValuesSourceConfig config, BucketOrder order, IncludeExclude includeExclude, String executionHint, SubAggCollectionMode collectMode, TermsAggregator.BucketCountThresholds bucketCountThresholds, boolean showTermDocCountError, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { @@ -90,7 +92,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) @@ -242,7 +244,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) @@ -262,7 +264,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) @@ -281,7 +283,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) @@ -319,7 +321,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 6aaccd22d72..595991dac06 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.Collections; @@ -50,7 +51,7 @@ public class UnmappedTerms extends InternalTerms pipelineAggregators, Map metaData) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index 746e0e5e161..995381373ab 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -33,7 +33,7 @@ import java.util.ArrayList; import java.util.List; /** - * A path that can be used to sort/order buckets (in some multi-bucket aggregations, eg terms & histogram) based on + * A path that can be used to sort/order buckets (in some multi-bucket aggregations, e.g. terms & histogram) based on * sub-aggregations. The path may point to either a single-bucket aggregation or a metrics aggregation. If the path * points to a single-bucket aggregation, the sort will be applied based on the {@code doc_count} of the bucket. If this * path points to a metrics aggregation, if it's a single-value metrics (eg. avg, max, min, etc..) the sort will be @@ -281,14 +281,15 @@ public class AggregationPath { /** * Validates this path over the given aggregator as a point of reference. * - * @param root The point of reference of this path + * @param root The point of reference of this path + * @throws AggregationExecutionException on validation error */ - public void validate(Aggregator root) { + public void validate(Aggregator root) throws AggregationExecutionException { Aggregator aggregator = root; for (int i = 0; i < pathElements.size(); i++) { aggregator = aggregator.subAggregator(pathElements.get(i).name); if (aggregator == null) { - throw new AggregationExecutionException("Invalid term-aggregator order path [" + this + "]. Unknown aggregation [" + throw new AggregationExecutionException("Invalid aggregator order path [" + this + "]. Unknown aggregation [" + pathElements.get(i).name + "]"); } if (i < pathElements.size() - 1) { @@ -296,16 +297,16 @@ public class AggregationPath { // we're in the middle of the path, so the aggregator can only be a single-bucket aggregator if (!(aggregator instanceof SingleBucketAggregator)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a final " + "single-bucket or a metrics aggregation at the path end. Sub-path [" + subPath(0, i + 1) + "] points to non single-bucket aggregation"); } if (pathElements.get(i).key != null) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a " + "final single-bucket or a metrics aggregation at the path end. Sub-path [" + subPath(0, i + 1) + "] points to non single-bucket aggregation"); @@ -314,8 +315,8 @@ public class AggregationPath { } boolean singleBucket = aggregator instanceof SingleBucketAggregator; if (!singleBucket && !(aggregator instanceof NumericMetricsAggregator)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a final " + "single-bucket or a metrics aggregation at the path end."); } @@ -324,7 +325,7 @@ public class AggregationPath { if (singleBucket) { if (lastToken.key != null && !"doc_count".equals(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Ordering on a single-bucket aggregation can only be done on its doc_count. " + "Either drop the key (a la \"" + lastToken.name + "\") or change it to \"doc_count\" (a la \"" + lastToken.name + ".doc_count\")"); } @@ -333,7 +334,7 @@ public class AggregationPath { if (aggregator instanceof NumericMetricsAggregator.SingleValue) { if (lastToken.key != null && !"value".equals(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Ordering on a single-value metrics aggregation can only be done on its value. " + "Either drop the key (a la \"" + lastToken.name + "\") or change it to \"value\" (a la \"" + lastToken.name + ".value\")"); } @@ -342,12 +343,12 @@ public class AggregationPath { // the aggregator must be of a multi-value metrics type if (lastToken.key == null) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. When ordering on a multi-value metrics aggregation a metric name must be specified"); } if (!((NumericMetricsAggregator.MultiValue) aggregator).hasMetric(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Unknown metric name [" + lastToken.key + "] on multi-value metrics aggregation [" + lastToken.name + "]"); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index c1155bdfbd6..c8eee3e91ef 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -19,19 +19,31 @@ package org.elasticsearch.search.fetch.subphase; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ConjunctionDISI; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocValuesTermsQuery; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; +import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.DocumentMapper; @@ -40,11 +52,11 @@ import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; -import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -57,7 +69,7 @@ public final class InnerHitsContext { this.innerHits = new HashMap<>(); } - public InnerHitsContext(Map innerHits) { + InnerHitsContext(Map innerHits) { this.innerHits = Objects.requireNonNull(innerHits); } @@ -77,14 +89,16 @@ public final class InnerHitsContext { public abstract static class BaseInnerHits extends SubSearchContext { private final String name; + final SearchContext context; private InnerHitsContext childInnerHits; - protected BaseInnerHits(String name, SearchContext context) { + BaseInnerHits(String name, SearchContext context) { super(context); this.name = name; + this.context = context; } - public abstract TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException; + public abstract TopDocs[] topDocs(SearchHit[] hits) throws IOException; public String getName() { return name; @@ -98,6 +112,12 @@ public final class InnerHitsContext { public void setChildInnerHits(Map childInnerHits) { this.childInnerHits = new InnerHitsContext(childInnerHits); } + + Weight createInnerHitQueryWeight() throws IOException { + final boolean needsScores = size() != 0 && (sort() == null || sort().sort.needsScores()); + return context.searcher().createNormalizedWeight(query(), needsScores); + } + } public static final class NestedInnerHits extends BaseInnerHits { @@ -112,35 +132,48 @@ public final class InnerHitsContext { } @Override - public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException { - Query rawParentFilter; - if (parentObjectMapper == null) { - rawParentFilter = Queries.newNonNestedFilter(); - } else { - rawParentFilter = parentObjectMapper.nestedTypeFilter(); - } - BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); - Query childFilter = childObjectMapper.nestedTypeFilter(); - int parentDocId = hitContext.readerContext().docBase + hitContext.docId(); - Query q = Queries.filtered(query(), new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId)); - - if (size() == 0) { - return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0); - } else { - int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); - TopDocsCollector topDocsCollector; - if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores()); + public TopDocs[] topDocs(SearchHit[] hits) throws IOException { + Weight innerHitQueryWeight = createInnerHitQueryWeight(); + TopDocs[] result = new TopDocs[hits.length]; + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + Query rawParentFilter; + if (parentObjectMapper == null) { + rawParentFilter = Queries.newNonNestedFilter(); } else { - topDocsCollector = TopScoreDocCollector.create(topN); + rawParentFilter = parentObjectMapper.nestedTypeFilter(); } - try { - context.searcher().search(q, topDocsCollector); - } finally { - clearReleasables(Lifetime.COLLECTION); + + int parentDocId = hit.docId(); + final int readerIndex = ReaderUtil.subIndex(parentDocId, searcher().getIndexReader().leaves()); + // With nested inner hits the nested docs are always in the same segement, so need to use the other segments + LeafReaderContext ctx = searcher().getIndexReader().leaves().get(readerIndex); + + Query childFilter = childObjectMapper.nestedTypeFilter(); + BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); + Query q = new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId); + Weight weight = context.searcher().createNormalizedWeight(q, false); + if (size() == 0) { + TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); + intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + } else { + int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); + TopDocsCollector topDocsCollector; + if (sort() != null) { + topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores()); + } else { + topDocsCollector = TopScoreDocCollector.create(topN); + } + try { + intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + } finally { + clearReleasables(Lifetime.COLLECTION); + } + result[i] = topDocsCollector.topDocs(from(), size()); } - return topDocsCollector.topDocs(from(), size()); } + return result; } } @@ -156,53 +189,65 @@ public final class InnerHitsContext { } @Override - public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException { - final Query hitQuery; - if (isParentHit(hitContext.hit())) { - String field = ParentFieldMapper.joinField(hitContext.hit().getType()); - hitQuery = new DocValuesTermsQuery(field, hitContext.hit().getId()); - } else if (isChildHit(hitContext.hit())) { - DocumentMapper hitDocumentMapper = mapperService.documentMapper(hitContext.hit().getType()); - final String parentType = hitDocumentMapper.parentFieldMapper().type(); - SearchHitField parentField = hitContext.hit().field(ParentFieldMapper.NAME); - if (parentField == null) { - throw new IllegalStateException("All children must have a _parent"); - } - Term uidTerm = context.mapperService().createUidTerm(parentType, parentField.getValue()); - if (uidTerm == null) { - hitQuery = new MatchNoDocsQuery("Missing type: " + parentType); + public TopDocs[] topDocs(SearchHit[] hits) throws IOException { + Weight innerHitQueryWeight = createInnerHitQueryWeight(); + TopDocs[] result = new TopDocs[hits.length]; + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + final Query hitQuery; + if (isParentHit(hit)) { + String field = ParentFieldMapper.joinField(hit.getType()); + hitQuery = new DocValuesTermsQuery(field, hit.getId()); + } else if (isChildHit(hit)) { + DocumentMapper hitDocumentMapper = mapperService.documentMapper(hit.getType()); + final String parentType = hitDocumentMapper.parentFieldMapper().type(); + SearchHitField parentField = hit.field(ParentFieldMapper.NAME); + if (parentField == null) { + throw new IllegalStateException("All children must have a _parent"); + } + Term uidTerm = context.mapperService().createUidTerm(parentType, parentField.getValue()); + if (uidTerm == null) { + hitQuery = new MatchNoDocsQuery("Missing type: " + parentType); + } else { + hitQuery = new TermQuery(uidTerm); + } } else { - hitQuery = new TermQuery(uidTerm); + result[i] = Lucene.EMPTY_TOP_DOCS; + continue; } - } else { - return Lucene.EMPTY_TOP_DOCS; - } - BooleanQuery q = new BooleanQuery.Builder() - .add(query(), Occur.MUST) - // Only include docs that have the current hit as parent - .add(hitQuery, Occur.FILTER) - // Only include docs that have this inner hits type - .add(documentMapper.typeFilter(context.getQueryShardContext()), Occur.FILTER) - .build(); - if (size() == 0) { - final int count = context.searcher().count(q); - return new TopDocs(count, Lucene.EMPTY_SCORE_DOCS, 0); - } else { - int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); - TopDocsCollector topDocsCollector; - if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores()); + BooleanQuery q = new BooleanQuery.Builder() + // Only include docs that have the current hit as parent + .add(hitQuery, Occur.FILTER) + // Only include docs that have this inner hits type + .add(documentMapper.typeFilter(context.getQueryShardContext()), Occur.FILTER) + .build(); + Weight weight = context.searcher().createNormalizedWeight(q, false); + if (size() == 0) { + TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); + for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); + } + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); } else { - topDocsCollector = TopScoreDocCollector.create(topN); + int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); + TopDocsCollector topDocsCollector; + if (sort() != null) { + topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores()); + } else { + topDocsCollector = TopScoreDocCollector.create(topN); + } + try { + for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + } + } finally { + clearReleasables(Lifetime.COLLECTION); + } + result[i] = topDocsCollector.topDocs(from(), size()); } - try { - context.searcher().search(q, topDocsCollector); - } finally { - clearReleasables(Lifetime.COLLECTION); - } - return topDocsCollector.topDocs(from(), size()); } + return result; } private boolean isParentHit(SearchHit hit) { @@ -214,4 +259,42 @@ public final class InnerHitsContext { return documentMapper.type().equals(hitDocumentMapper.parentFieldMapper().type()); } } + + static void intersect(Weight weight, Weight innerHitQueryWeight, Collector collector, LeafReaderContext ctx) throws IOException { + ScorerSupplier scorerSupplier = weight.scorerSupplier(ctx); + if (scorerSupplier == null) { + return; + } + // use random access since this scorer will be consumed on a minority of documents + Scorer scorer = scorerSupplier.get(true); + + ScorerSupplier innerHitQueryScorerSupplier = innerHitQueryWeight.scorerSupplier(ctx); + if (innerHitQueryScorerSupplier == null) { + return; + } + // use random access since this scorer will be consumed on a minority of documents + Scorer innerHitQueryScorer = innerHitQueryScorerSupplier.get(true); + + final LeafCollector leafCollector; + try { + leafCollector = collector.getLeafCollector(ctx); + // Just setting the innerHitQueryScorer is ok, because that is the actual scoring part of the query + leafCollector.setScorer(innerHitQueryScorer); + } catch (CollectionTerminatedException e) { + return; + } + + try { + Bits acceptDocs = ctx.reader().getLiveDocs(); + DocIdSetIterator iterator = ConjunctionDISI.intersectIterators(Arrays.asList(innerHitQueryScorer.iterator(), + scorer.iterator())); + for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) { + if (acceptDocs == null || acceptDocs.get(docId)) { + leafCollector.collect(docId); + } + } + } catch (CollectionTerminatedException e) { + // ignore and continue + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java index 48294bd82c5..90e1b5cf828 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java @@ -22,12 +22,11 @@ package org.elasticsearch.search.fetch.subphase; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -43,39 +42,42 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) { + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if ((context.innerHits() != null && context.innerHits().getInnerHits().size() > 0) == false) { return; } - Map results = new HashMap<>(); + for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { InnerHitsContext.BaseInnerHits innerHits = entry.getValue(); - TopDocs topDocs; - try { - topDocs = innerHits.topDocs(context, hitContext); - } catch (IOException e) { - throw ExceptionsHelper.convertToElastic(e); - } - innerHits.queryResult().topDocs(topDocs, innerHits.sort() == null ? null : innerHits.sort().formats); - int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; - for (int i = 0; i < topDocs.scoreDocs.length; i++) { - docIdsToLoad[i] = topDocs.scoreDocs[i].doc; - } - innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); - fetchPhase.execute(innerHits); - FetchSearchResult fetchResult = innerHits.fetchResult(); - SearchHit[] internalHits = fetchResult.fetchResult().hits().internalHits(); - for (int i = 0; i < internalHits.length; i++) { - ScoreDoc scoreDoc = topDocs.scoreDocs[i]; - SearchHit searchHitFields = internalHits[i]; - searchHitFields.score(scoreDoc.score); - if (scoreDoc instanceof FieldDoc) { - FieldDoc fieldDoc = (FieldDoc) scoreDoc; - searchHitFields.sortValues(fieldDoc.fields, innerHits.sort().formats); + TopDocs[] topDocs = innerHits.topDocs(hits); + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + TopDocs topDoc = topDocs[i]; + + Map results = hit.getInnerHits(); + if (results == null) { + hit.setInnerHits(results = new HashMap<>()); } + innerHits.queryResult().topDocs(topDoc, innerHits.sort() == null ? null : innerHits.sort().formats); + int[] docIdsToLoad = new int[topDoc.scoreDocs.length]; + for (int j = 0; j < topDoc.scoreDocs.length; j++) { + docIdsToLoad[j] = topDoc.scoreDocs[j].doc; + } + innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); + fetchPhase.execute(innerHits); + FetchSearchResult fetchResult = innerHits.fetchResult(); + SearchHit[] internalHits = fetchResult.fetchResult().hits().internalHits(); + for (int j = 0; j < internalHits.length; j++) { + ScoreDoc scoreDoc = topDoc.scoreDocs[j]; + SearchHit searchHitFields = internalHits[j]; + searchHitFields.score(scoreDoc.score); + if (scoreDoc instanceof FieldDoc) { + FieldDoc fieldDoc = (FieldDoc) scoreDoc; + searchHitFields.sortValues(fieldDoc.fields, innerHits.sort().formats); + } + } + results.put(entry.getKey(), fetchResult.hits()); } - results.put(entry.getKey(), fetchResult.hits()); } - hitContext.hit().setInnerHits(results); } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index f6faaaeea5c..24f2647167e 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -58,7 +58,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { private final TermsEnum termsEnum; private final IndexReader reader; private final long dictSize; - private final double logBase = 5; + private static final double LOG_BASE = 5; private final long frequencyPlateau; private final Analyzer preFilter; private final Analyzer postFilter; @@ -189,7 +189,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { protected long thresholdFrequency(long termFrequency, long dictionarySize) { if (termFrequency > 0) { - return max(0, round(termFrequency * (log10(termFrequency - frequencyPlateau) * (1.0 / log10(logBase))) + 1)); + return max(0, round(termFrequency * (log10(termFrequency - frequencyPlateau) * (1.0 / log10(LOG_BASE))) + 1)); } return 0; diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapTests.java new file mode 100644 index 00000000000..b2fab7746fb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.test.ESTestCase; + +public class BootstrapTests extends ESTestCase { + + public void testConfigDeprecation() { + Bootstrap.checkConfigExtension(".json"); + assertWarnings("elasticsearch.json is deprecated; rename your configuration file to elasticsearch.yaml"); + Bootstrap.checkConfigExtension(".yml"); + assertWarnings("elasticsearch.yml is deprecated; rename your configuration file to elasticsearch.yaml"); + Bootstrap.checkConfigExtension(".yaml"); // no warnings, will be checked in @After + } +} diff --git a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 6de9cbab620..cd6622368c9 100644 --- a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; @@ -311,13 +312,16 @@ public class InnerHitBuilderTests extends ESTestCase { } innerHits.setScriptFields(new HashSet<>(scriptFields.values())); FetchSourceContext randomFetchSourceContext; - if (randomBoolean()) { - randomFetchSourceContext = new FetchSourceContext(randomBoolean()); - } else { + int randomInt = randomIntBetween(0, 2); + if (randomInt == 0) { + randomFetchSourceContext = new FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY); + } else if (randomInt == 1) { randomFetchSourceContext = new FetchSourceContext(true, generateRandomStringArray(12, 16, false), generateRandomStringArray(12, 16, false) ); + } else { + randomFetchSourceContext = new FetchSourceContext(randomBoolean()); } innerHits.setFetchSourceContext(randomFetchSourceContext); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index f2d66409e9e..c321ffa965a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.search.MoreLikeThisQuery; @@ -64,6 +65,8 @@ import static org.hamcrest.Matchers.instanceOf; public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase { + private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[]{Item.Field.DOC.getPreferredName()}; + private static String[] randomFields; private static Item[] randomLikeItems; private static Item[] randomUnlikeItems; @@ -204,6 +207,16 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase getObjectsHoldingArbitraryContent() { //doc contains arbitrary content, anything can be added to it and no exception will be thrown diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 702e8b60917..90d11efb11c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -83,6 +83,9 @@ import static org.hamcrest.Matchers.nullValue; public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase { + private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[] {Script.PARAMS_PARSE_FIELD.getPreferredName(), + ExponentialDecayFunctionBuilder.NAME, LinearDecayFunctionBuilder.NAME, GaussDecayFunctionBuilder.NAME}; + @Override protected Collection> getPlugins() { return Collections.singleton(TestPlugin.class); @@ -106,6 +109,12 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when adding additional objects @@ -218,7 +227,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase randomSearchFailures() { + private List randomSearchFailures() { if (randomBoolean()) { return emptyList(); } @@ -68,7 +67,7 @@ public class BulkByScrollResponseTests extends ESTestCase { shardId = randomInt(); nodeId = usually() ? randomAlphaOfLength(5) : null; } - return singletonList(new SearchFailure(new ElasticsearchException("foo"), index, shardId, nodeId)); + return singletonList(new ScrollableHitSource.SearchFailure(new ElasticsearchException("foo"), index, shardId, nodeId)); } private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollResponse actual) { @@ -86,8 +85,8 @@ public class BulkByScrollResponseTests extends ESTestCase { } assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size()); for (int i = 0; i < expected.getSearchFailures().size(); i++) { - SearchFailure expectedFailure = expected.getSearchFailures().get(i); - SearchFailure actualFailure = actual.getSearchFailures().get(i); + ScrollableHitSource.SearchFailure expectedFailure = expected.getSearchFailures().get(i); + ScrollableHitSource.SearchFailure actualFailure = actual.getSearchFailures().get(i); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); assertEquals(expectedFailure.getShardId(), actualFailure.getShardId()); assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId()); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskStatusTests.java b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java similarity index 97% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskStatusTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index 503fe1db7cd..982198c8fee 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskStatusTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; @@ -26,6 +26,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.List; @@ -75,7 +76,7 @@ public class BulkByScrollTaskStatusTests extends ESTestCase { assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled()); assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil()); if (version.onOrAfter(Version.V_5_1_1_UNRELEASED)) { - assertThat(actual.getSliceStatuses(), hasSize(expected.getSliceStatuses().size())); + assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); for (int i = 0; i < expected.getSliceStatuses().size(); i++) { BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); if (sliceStatus == null) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskTests.java b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java index ff0eae55520..f4d4ea790bc 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTaskTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java b/core/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java index f5c00f63de9..8c84c8f3f56 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.SearchRequest; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTaskTests.java b/core/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTaskTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java index 715fcaaad54..6e2d44abed5 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTaskTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/core/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java similarity index 97% rename from modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index d1bb6f6096c..32b01237375 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -20,11 +20,9 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequestTestCase; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.search.slice.SliceBuilder; import static java.util.Collections.emptyMap; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/core/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java similarity index 97% rename from modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java index 700f45b42c5..b30968cf056 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequestTestCase; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java b/core/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java rename to core/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java index 7356d626c10..5d594d080b8 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java +++ b/core/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java @@ -17,10 +17,8 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; -import org.elasticsearch.action.bulk.byscroll.WorkingBulkByScrollTask; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.tasks.TaskId; diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index c08e3976bde..c13177a6250 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -158,7 +158,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase final ShardRouting shardRouting = shard.routingEntry(); assertThat(shardRouting + " local checkpoint mismatch", shardStats.getLocalCheckpoint(), equalTo(numDocs - 1L)); /* - * After the last indexing operation completes, the primary will advance its global checkpoint. Without an other indexing + * After the last indexing operation completes, the primary will advance its global checkpoint. Without another indexing * operation, or a background sync, the primary will not have broadcast this global checkpoint to its replicas. However, a * shard could have recovered from the primary in which case its global checkpoint will be in-sync with the primary. * Therefore, we can only assert that the global checkpoint is number of docs minus one (matching the primary, in case of a @@ -178,6 +178,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase // simulate a background global checkpoint sync at which point we expect the global checkpoint to advance on the replicas shards.syncGlobalCheckpoint(); + final long noOpsPerformed = SequenceNumbersService.NO_OPS_PERFORMED; for (IndexShard shard : shards) { final SeqNoStats shardStats = shard.seqNoStats(); final ShardRouting shardRouting = shard.routingEntry(); @@ -185,7 +186,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase assertThat( shardRouting + " global checkpoint mismatch", shardStats.getGlobalCheckpoint(), - numDocs == 0 ? equalTo(unassignedSeqNo) : equalTo(numDocs - 1L)); + numDocs == 0 ? equalTo(noOpsPerformed) : equalTo(numDocs - 1L)); assertThat(shardRouting + " max seq no mismatch", shardStats.getMaxSeqNo(), equalTo(numDocs - 1L)); } } diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java index b99ac13a0d4..61eb4581328 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java @@ -145,17 +145,20 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final Map assigned = new HashMap<>(); assigned.putAll(active); assigned.putAll(initializing); - final String maxActiveID = active.entrySet().stream().max(Comparator.comparing(Map.Entry::getValue)).get().getKey(); tracker.updateAllocationIdsFromMaster( - active.entrySet().stream().filter(e -> !e.getKey().equals(maxActiveID)).map(Map.Entry::getKey).collect(Collectors.toSet()), + active.keySet(), initializing.keySet()); randomSubsetOf(initializing.keySet()).forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); - assigned.forEach(tracker::updateLocalCheckpoint); + final String missingActiveID = randomFrom(active.keySet()); + assigned + .entrySet() + .stream() + .filter(e -> !e.getKey().equals(missingActiveID)) + .forEach(e -> tracker.updateLocalCheckpoint(e.getKey(), e.getValue())); - // now mark all active shards - tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet()); + assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); - // update again + // now update all knowledge of all shards assigned.forEach(tracker::updateLocalCheckpoint); assertThat(tracker.getGlobalCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO))); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 338634d977e..a5e5ecd8aa6 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -69,6 +69,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -81,6 +82,8 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -168,33 +171,43 @@ public class IndexShardIT extends ESSingleNodeTestCase { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); - shard.checkIdle(Long.MIN_VALUE); + Translog translog = ShardUtilsTests.getShardEngine(shard).getTranslog(); + Predicate needsSync = (tlog) -> { + // we can't use tlog.needsSync() here since it also takes the global checkpoint into account + // we explicitly want to check here if our durability checks are taken into account so we only + // check if we are synced upto the current write location + Translog.Location lastWriteLocation = tlog.getLastWriteLocation(); + try { + // the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one + return tlog.ensureSynced(new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; setDurability(shard, Translog.Durability.REQUEST); - assertBusy(() -> assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded())); + assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); client().prepareIndex("test", "bar", "2").setSource("{}", XContentType.JSON).get(); - assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + assertTrue(needsSync.test(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "bar", "1").get(); - shard.checkIdle(Long.MIN_VALUE); - assertBusy(() -> assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded())); + assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); client().prepareDelete("test", "bar", "2").get(); - assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + assertTrue(translog.syncNeeded()); setDurability(shard, Translog.Durability.REQUEST); assertNoFailures(client().prepareBulk() .add(client().prepareIndex("test", "bar", "3").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "bar", "1")).get()); - shard.checkIdle(Long.MIN_VALUE); - assertBusy(() -> assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded())); + assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); assertNoFailures(client().prepareBulk() .add(client().prepareIndex("test", "bar", "4").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "bar", "3")).get()); setDurability(shard, Translog.Durability.REQUEST); - assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + assertTrue(needsSync.test(translog)); } private void setDurability(IndexShard shard, Translog.Durability durability) { diff --git a/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java index 033fa9cdf20..5db397ab16d 100644 --- a/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java @@ -28,17 +28,14 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; -import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collections; import java.util.Map; @@ -155,22 +152,36 @@ public class InternalSettingsPreparerTests extends ESTestCase { public void testMultipleSettingsFileNotAllowed() throws IOException { InputStream yaml = getClass().getResourceAsStream("/config/elasticsearch.yaml"); - InputStream properties = getClass().getResourceAsStream("/config/elasticsearch.properties"); - Path home = createTempDir(); - Path config = home.resolve("config"); + InputStream json = getClass().getResourceAsStream("/config/elasticsearch.json"); + Path config = homeDir.resolve("config"); Files.createDirectory(config); Files.copy(yaml, config.resolve("elasticsearch.yaml")); - Files.copy(properties, config.resolve("elasticsearch.properties")); + Files.copy(json, config.resolve("elasticsearch.json")); - try { - InternalSettingsPreparer.prepareEnvironment(Settings.builder() - .put(baseEnvSettings) - .build(), null); - } catch (SettingsException e) { - assertTrue(e.getMessage(), e.getMessage().contains("multiple settings files found with suffixes")); - assertTrue(e.getMessage(), e.getMessage().contains(".yaml")); - assertTrue(e.getMessage(), e.getMessage().contains(".properties")); - } + SettingsException e = expectThrows(SettingsException.class, () -> + InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(), null) + ); + assertTrue(e.getMessage(), e.getMessage().contains("multiple settings files found with suffixes")); + assertTrue(e.getMessage(), e.getMessage().contains(".yaml")); + assertTrue(e.getMessage(), e.getMessage().contains(".json")); + } + + public void testYmlExtension() throws IOException { + InputStream yaml = getClass().getResourceAsStream("/config/elasticsearch.yaml"); + Path config = homeDir.resolve("config"); + Files.createDirectory(config); + Files.copy(yaml, config.resolve("elasticsearch.yml")); + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(), null); + assertEquals(".yml", env.configExtension()); + } + + public void testJsonExtension() throws IOException { + InputStream yaml = getClass().getResourceAsStream("/config/elasticsearch.json"); + Path config = homeDir.resolve("config"); + Files.createDirectory(config); + Files.copy(yaml, config.resolve("elasticsearch.json")); + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(), null); + assertEquals(".json", env.configExtension()); } public void testSecureSettings() { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java new file mode 100644 index 00000000000..43d10af99fb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class InternalOrderTests extends AbstractSerializingTestCase { + + @Override + protected BucketOrder createTestInstance() { + if (randomBoolean()) { + return getRandomOrder(); + } else { + List orders = new ArrayList<>(); + for (int i = 0; i < randomInt(3); i++) { + orders.add(getRandomOrder()); + } + return BucketOrder.compound(orders); + } + } + + private BucketOrder getRandomOrder() { + switch(randomInt(2)) { + case 0: return BucketOrder.key(randomBoolean()); + case 1: return BucketOrder.count(randomBoolean()); + default: return BucketOrder.aggregation(randomAlphaOfLength(10), randomBoolean()); + } + } + + @Override + protected Reader instanceReader() { + return InternalOrder.Streams::readOrder; + } + + @Override + protected BucketOrder doParseInstance(XContentParser parser) throws IOException { + Token token = parser.nextToken(); + if (token == Token.START_OBJECT) { + return InternalOrder.Parser.parseOrderParam(parser, null); + } + if (token == Token.START_ARRAY) { + List orders = new ArrayList<>(); + while (parser.nextToken() == Token.START_OBJECT) { + orders.add(InternalOrder.Parser.parseOrderParam(parser, null)); + } + return BucketOrder.compound(orders); + } + return null; + } + + @Override + protected BucketOrder assertSerialization(BucketOrder testInstance) throws IOException { + // identical behavior to AbstractWireSerializingTestCase, except assertNotSame is only called for + // compound and aggregation order because _key and _count orders are static instances. + BucketOrder deserializedInstance = copyInstance(testInstance); + assertEquals(testInstance, deserializedInstance); + assertEquals(testInstance.hashCode(), deserializedInstance.hashCode()); + if(testInstance instanceof CompoundOrder || testInstance instanceof InternalOrder.Aggregation) { + assertNotSame(testInstance, deserializedInstance); + } + return deserializedInstance; + } + + @Override + protected void assertParsedInstance(XContentType xContentType, BytesReference instanceAsBytes, BucketOrder expectedInstance) + throws IOException { + // identical behavior to AbstractSerializingTestCase, except assertNotSame is only called for + // compound and aggregation order because _key and _count orders are static instances. + XContentParser parser = createParser(XContentFactory.xContent(xContentType), instanceAsBytes); + BucketOrder newInstance = parseInstance(parser); + assertEquals(expectedInstance, newInstance); + assertEquals(expectedInstance.hashCode(), newInstance.hashCode()); + if(expectedInstance instanceof CompoundOrder || expectedInstance instanceof InternalOrder.Aggregation) { + assertNotSame(newInstance, expectedInstance); + } + } + + public void testHistogramOrderBwc() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + BucketOrder order = createTestInstance(); + Version bwcVersion = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2_UNRELEASED)); + boolean bwcOrderFlag = randomBoolean(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(bwcVersion); + InternalOrder.Streams.writeHistogramOrder(order, out, bwcOrderFlag); + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(bwcVersion); + BucketOrder actual = InternalOrder.Streams.readHistogramOrder(in, bwcOrderFlag); + BucketOrder expected = order; + if (order instanceof CompoundOrder) { + expected = ((CompoundOrder) order).orderElements.get(0); + } + assertEquals(expected, actual); + } + } + } + } + + public void testAggregationOrderEqualsAndHashCode() { + String path = randomAlphaOfLength(10); + boolean asc = randomBoolean(); + BucketOrder o1 = BucketOrder.aggregation(path, asc); + BucketOrder o2 = BucketOrder.aggregation(path + "test", asc); + BucketOrder o3 = BucketOrder.aggregation(path, !asc); + BucketOrder o4 = BucketOrder.aggregation(path, asc); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o3); + assertNotEquals(o1.hashCode(), o3.hashCode()); + assertEquals(o1, o4); + assertEquals(o1.hashCode(), o4.hashCode()); + + o1 = InternalOrder.compound(o1); + o2 = InternalOrder.compound(o2); + o3 = InternalOrder.compound(o3); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o3); + assertNotEquals(o1.hashCode(), o3.hashCode()); + assertNotEquals(o1, o4); + assertNotEquals(o1.hashCode(), o4.hashCode()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index cef4cb07f88..bedd8610a40 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; @@ -30,13 +32,16 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.DateScriptMocks.DateScriptsMockPlugin; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTime; @@ -57,6 +62,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; @@ -73,6 +79,8 @@ import static org.hamcrest.core.IsNull.notNullValue; @ESIntegTestCase.SuiteScopeTestCase public class DateHistogramIT extends ESIntegTestCase { + static Map> expectedMultiSortBuckets; + private DateTime date(int month, int day) { return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); } @@ -98,6 +106,7 @@ public class DateHistogramIT extends ESIntegTestCase { return client().prepareIndex("idx", "type").setSource(jsonBuilder() .startObject() .field("value", value) + .field("constant", 1) .field("date", date(month, day)) .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray() .endObject()); @@ -115,6 +124,9 @@ public class DateHistogramIT extends ESIntegTestCase { .field("value", i * 2) .endObject())); } + + getMultiSortDocs(builders); + builders.addAll(Arrays.asList( indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 @@ -126,6 +138,50 @@ public class DateHistogramIT extends ESIntegTestCase { ensureSearchable(); } + private void addExpectedBucket(DateTime key, long docCount, double avg, double sum) { + Map bucketProps = new HashMap<>(); + bucketProps.put("_count", docCount); + bucketProps.put("avg_l", avg); + bucketProps.put("sum_d", sum); + expectedMultiSortBuckets.put(key, bucketProps); + } + + private void getMultiSortDocs(List builders) throws IOException { + expectedMultiSortBuckets = new HashMap<>(); + addExpectedBucket(date(1, 1), 3, 1, 6); + addExpectedBucket(date(1, 2), 3, 2, 6); + addExpectedBucket(date(1, 3), 2, 3, 3); + addExpectedBucket(date(1, 4), 2, 3, 4); + addExpectedBucket(date(1, 5), 2, 5, 3); + addExpectedBucket(date(1, 6), 1, 5, 1); + addExpectedBucket(date(1, 7), 1, 5, 1); + + assertAcked(client().admin().indices().prepareCreate("sort_idx") + .addMapping("type", "date", "type=date").get()); + for (int i = 1; i <= 3; i++) { + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 1)).field("l", 1).field("d", i).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 2)).field("l", 2).field("d", i).endObject())); + } + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 3)).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 4)).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 5)).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 6)).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 7)).field("l", 5).field("d", 1).endObject())); + } + @Override protected Collection> nodePlugins() { return Arrays.asList( @@ -281,7 +337,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.KEY_ASC)) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -304,7 +360,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.KEY_DESC)) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -326,7 +382,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_ASC)) + .order(BucketOrder.count(true))) .execute().actionGet(); assertSearchResponse(response); @@ -348,7 +404,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_DESC)) + .order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -428,7 +484,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("sum", true)) + .order(BucketOrder.aggregation("sum", true)) .subAggregation(max("sum").field("value"))) .execute().actionGet(); @@ -451,7 +507,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("sum", false)) + .order(BucketOrder.aggregation("sum", false)) .subAggregation(max("sum").field("value"))) .execute().actionGet(); @@ -474,7 +530,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("stats", "sum", false)) + .order(BucketOrder.aggregation("stats", "sum", false)) .subAggregation(stats("stats").field("value"))) .execute().actionGet(); @@ -492,6 +548,60 @@ public class DateHistogramIT extends ESIntegTestCase { } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(dateHistogram("histo") + .field("date") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 1; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(bucket.getKey(), equalTo(date(i, 1))); + i++; + } + } + + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { + boolean asc = true; + try { + client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("inner_histo>avg", asc)) + .subAggregation(dateHistogram("inner_histo") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .field("dates") + .subAggregation(avg("avg").field("value")))) + .execute().actionGet(); + fail("Expected an exception"); + } catch (SearchPhaseExecutionException e) { + ElasticsearchException[] rootCauses = e.guessRootCauses(); + if (rootCauses.length == 1) { + ElasticsearchException rootCause = rootCauses[0]; + if (rootCause instanceof AggregationExecutionException) { + AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); + } else { + throw e; + } + } else { + throw e; + } + } + } + public void testSingleValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); @@ -583,12 +693,12 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(bucket.getDocCount(), equalTo(3L)); } - public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { + public void testMultiValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("dates") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_DESC)) + .order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -598,23 +708,26 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(4)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(3, 1))); assertThat(bucket.getDocCount(), equalTo(5L)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(2, 1))); assertThat(bucket.getDocCount(), equalTo(3L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(4, 1))); assertThat(bucket.getDocCount(), equalTo(3L)); bucket = buckets.get(3); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(1, 1))); assertThat(bucket.getDocCount(), equalTo(1L)); } @@ -1236,4 +1349,75 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(1L)); } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { + int[] expectedDays = new int[] { 1, 2, 4, 3, 7, 6, 5 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { + int[] expectedDays = new int[] { 5, 6, 7, 3, 4, 2, 1 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { + int[] expectedDays = new int[] { 6, 7, 3, 4, 5, 1, 2 }; + assertMultiSortResponse(expectedDays, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { + int[] expectedDays = new int[] { 6, 7, 3, 5, 4, 1, 2 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { + int[] expectedDays = new int[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true)); + } + + private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { + DateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(DateTime[]::new); + SearchResponse response = client() + .prepareSearch("sort_idx") + .setTypes("type") + .addAggregation( + dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + + private DateTime key(Histogram.Bucket bucket) { + return (DateTime) bucket.getKey(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index 76e58c715bf..e86b3a553e9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -20,10 +20,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests; +import org.elasticsearch.search.aggregations.BucketOrder; + +import java.util.ArrayList; +import java.util.List; public class DateHistogramTests extends BaseAggregationTestCase { @@ -80,29 +83,41 @@ public class DateHistogramTests extends BaseAggregationTestCase order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); } } return factory; } + private List randomOrder() { + List orders = new ArrayList<>(); + switch (randomInt(4)) { + case 0: + orders.add(BucketOrder.key(randomBoolean())); + break; + case 1: + orders.add(BucketOrder.count(randomBoolean())); + break; + case 2: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 3: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 4: + int numOrders = randomIntBetween(1, 3); + for (int i = 0; i < numOrders; i++) { + orders.addAll(randomOrder()); + } + break; + default: + fail(); + } + return orders; + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index c8803b7e790..6710bcdb231 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -103,7 +104,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") - .order(Terms.Order.aggregation("sample>max_price.value", asc)) + .order(BucketOrder.aggregation("sample>max_price.value", asc)) .subAggregation(sampler("sample").shardSize(100) .subAggregation(max("max_price").field("price"))) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index ca106721fcc..2363c21c7d1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -41,6 +41,7 @@ import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -134,6 +135,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .startObject() .field(SINGLE_VALUED_FIELD_NAME, (double) i) .field("num_tag", i < NUM_DOCS/2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) .startArray(MULTI_VALUED_FIELD_NAME).value((double) i).value(i + 1d).endArray() .endObject())); @@ -315,7 +317,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .field(SINGLE_VALUED_FIELD_NAME) .size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -363,15 +365,15 @@ public class DoubleTermsIT extends AbstractTermsTestCase { assertThat(bucket.getDocCount(), equalTo(1L)); } } - + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); } - + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); } - + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") @@ -399,14 +401,14 @@ public class DoubleTermsIT extends AbstractTermsTestCase { } } assertEquals(expectedCardinality, foundTerms.size()); - } + } public void testSingleValueFieldOrderedByTermAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -432,7 +434,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(false))) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -453,6 +455,33 @@ public class DoubleTermsIT extends AbstractTermsTestCase { } } + public void testSingleValueFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms") + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + i++; + } + } + public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") @@ -759,7 +788,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) + .order(BucketOrder.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -789,7 +818,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation( terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode( @@ -831,7 +860,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("num_tags").field("num_tag").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter", asc)) + .order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).execute().actionGet(); @@ -869,7 +898,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { terms("tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( @@ -923,7 +952,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet(); + .order(BucketOrder.aggregation("avg_i", true))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -941,7 +970,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("num_tags", true)) + .order(BucketOrder.aggregation("num_tags", true)) .subAggregation( terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -960,7 +989,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME + "2").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)) + .order(BucketOrder.aggregation("stats.foo", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + @@ -978,7 +1007,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)) + .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + @@ -996,7 +1025,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) + .order(BucketOrder.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -1026,7 +1055,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1054,7 +1083,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1082,7 +1111,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.variance", asc)) + .order(BucketOrder.aggregation("stats.variance", asc)) .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1139,48 +1168,48 @@ public class DoubleTermsIT extends AbstractTermsTestCase { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { double[] expectedKeys = new double[] { 1, 2, 4, 3, 7, 6, 5 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { double[] expectedKeys = new double[] { 5, 6, 7, 3, 4, 2, 1 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { double[] expectedKeys = new double[] { 6, 7, 3, 4, 5, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { double[] expectedKeys = new double[] { 6, 7, 3, 5, 4, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { double[] expectedKeys = new double[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), - Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(double[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") .setTypes("multi_sort_type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.compound(order)).subAggregation(avg("avg_l").field("l")) + .order(BucketOrder.compound(order)).subAggregation(avg("avg_l").field("l")) .subAggregation(sum("sum_d").field("d"))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 683e7924419..d7bd069f2ba 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.LongHashSet; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -27,16 +29,20 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -48,6 +54,7 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; @@ -72,6 +79,7 @@ public class HistogramIT extends ESIntegTestCase { static int interval; static int numValueBuckets, numValuesBuckets; static long[] valueCounts, valuesCounts; + static Map> expectedMultiSortBuckets; @Override protected Collection> nodePlugins() { @@ -130,16 +138,18 @@ public class HistogramIT extends ESIntegTestCase { } List builders = new ArrayList<>(); - for (int i = 0; i < numDocs; i++) { builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder() .startObject() .field(SINGLE_VALUED_FIELD_NAME, i + 1) .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray() .field("tag", "tag" + i) + .field("constant", 1) .endObject())); } + getMultiSortDocs(builders); + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder() @@ -151,6 +161,51 @@ public class HistogramIT extends ESIntegTestCase { ensureSearchable(); } + private void addExpectedBucket(long key, long docCount, double avg, double sum) { + Map bucketProps = new HashMap<>(); + bucketProps.put("key", key); + bucketProps.put("_count", docCount); + bucketProps.put("avg_l", avg); + bucketProps.put("sum_d", sum); + expectedMultiSortBuckets.put(key, bucketProps); + } + + private void getMultiSortDocs(List builders) throws IOException { + expectedMultiSortBuckets = new HashMap<>(); + addExpectedBucket(1, 3, 1, 6); + addExpectedBucket(2, 3, 2, 6); + addExpectedBucket(3, 2, 3, 3); + addExpectedBucket(4, 2, 3, 4); + addExpectedBucket(5, 2, 5, 3); + addExpectedBucket(6, 1, 5, 1); + addExpectedBucket(7, 1, 5, 1); + + assertAcked(client().admin().indices().prepareCreate("sort_idx") + .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=double").get()); + for (int i = 1; i <= 3; i++) { + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject())); + } + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject())); + } + public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) @@ -241,7 +296,7 @@ public class HistogramIT extends ESIntegTestCase { public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_ASC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -252,7 +307,6 @@ public class HistogramIT extends ESIntegTestCase { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -264,7 +318,7 @@ public class HistogramIT extends ESIntegTestCase { public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -275,7 +329,6 @@ public class HistogramIT extends ESIntegTestCase { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); @@ -287,7 +340,7 @@ public class HistogramIT extends ESIntegTestCase { public void testSingleValuedFieldOrderedByCountAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true))) .execute().actionGet(); assertSearchResponse(response); @@ -299,7 +352,6 @@ public class HistogramIT extends ESIntegTestCase { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); LongHashSet buckets = new LongHashSet(); - // TODO: use diamond once JI-9019884 is fixed List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MIN_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -316,7 +368,7 @@ public class HistogramIT extends ESIntegTestCase { public void testSingleValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_DESC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -328,7 +380,6 @@ public class HistogramIT extends ESIntegTestCase { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); LongHashSet buckets = new LongHashSet(); - // TODO: use diamond once JI-9019884 is fixed List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MAX_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -361,7 +412,6 @@ public class HistogramIT extends ESIntegTestCase { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -390,7 +440,7 @@ public class HistogramIT extends ESIntegTestCase { histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("sum", true)) + .order(BucketOrder.aggregation("sum", true)) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -404,7 +454,6 @@ public class HistogramIT extends ESIntegTestCase { LongHashSet visited = new LongHashSet(); double previousSum = Double.NEGATIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -434,7 +483,7 @@ public class HistogramIT extends ESIntegTestCase { histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("sum", false)) + .order(BucketOrder.aggregation("sum", false)) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -448,7 +497,6 @@ public class HistogramIT extends ESIntegTestCase { LongHashSet visited = new LongHashSet(); double previousSum = Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -478,7 +526,7 @@ public class HistogramIT extends ESIntegTestCase { histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("stats.sum", false)) + .order(BucketOrder.aggregation("stats.sum", false)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -492,7 +540,7 @@ public class HistogramIT extends ESIntegTestCase { LongHashSet visited = new LongHashSet(); double previousSum = Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -523,7 +571,7 @@ public class HistogramIT extends ESIntegTestCase { histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("filter>max", asc)) + .order(BucketOrder.aggregation("filter>max", asc)) .subAggregation(filter("filter", matchAllQuery()) .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) .execute().actionGet(); @@ -538,7 +586,6 @@ public class HistogramIT extends ESIntegTestCase { LongHashSet visited = new LongHashSet(); double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -558,6 +605,62 @@ public class HistogramIT extends ESIntegTestCase { } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo") + .field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { + boolean asc = true; + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("inner_histo>avg", asc)) + .subAggregation(histogram("inner_histo") + .interval(interval) + .field(MULTI_VALUED_FIELD_NAME) + .subAggregation(avg("avg").field("value")))) + .execute().actionGet(); + fail("Expected an exception"); + } catch (SearchPhaseExecutionException e) { + ElasticsearchException[] rootCauses = e.guessRootCauses(); + if (rootCauses.length == 1) { + ElasticsearchException rootCause = rootCauses[0]; + if (rootCause instanceof AggregationExecutionException) { + AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); + } else { + throw e; + } + } else { + throw e; + } + } + } + public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation( @@ -614,7 +717,7 @@ public class HistogramIT extends ESIntegTestCase { public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC)) + .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -625,7 +728,6 @@ public class HistogramIT extends ESIntegTestCase { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValuesBuckets; ++i) { Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); @@ -1036,4 +1138,74 @@ public class HistogramIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(1L)); } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 4, 3, 7, 6, 5 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { + long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { + long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { + long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { + long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); + } + + private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { + SearchResponse response = client() + .prepareSearch("sort_idx") + .setTypes("type") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + + private long key(Histogram.Bucket bucket) { + return ((Number) bucket.getKey()).longValue(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java index ea61a8168ad..ee22b229177 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java @@ -21,7 +21,10 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.BucketOrder; + +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; @@ -54,26 +57,11 @@ public class HistogramTests extends BaseAggregationTestCase order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); } } return factory; @@ -102,4 +90,31 @@ public class HistogramTests extends BaseAggregationTestCase randomOrder() { + List orders = new ArrayList<>(); + switch (randomInt(4)) { + case 0: + orders.add(BucketOrder.key(randomBoolean())); + break; + case 1: + orders.add(BucketOrder.count(randomBoolean())); + break; + case 2: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 3: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 4: + int numOrders = randomIntBetween(1, 3); + for (int i = 0; i < numOrders; i++) { + orders.addAll(randomOrder()); + } + break; + default: + fail(); + } + return orders; + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index a54dc3e2f5e..565cdaaa87e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -121,6 +122,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .field(SINGLE_VALUED_FIELD_NAME, i) .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray() .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) .endObject()); } indexRandom(true, lowCardBuilders); @@ -392,7 +394,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .field(SINGLE_VALUED_FIELD_NAME) .size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -417,7 +419,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -441,7 +443,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(false))) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -462,6 +464,31 @@ public class LongTermsIT extends AbstractTermsTestCase { } } + public void testSingleValueFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms") + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + i++; + } + } + public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") @@ -769,7 +796,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -798,7 +825,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation( avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation( @@ -842,7 +869,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("num_tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter", asc)) + .order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) ).get(); @@ -879,7 +906,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) .subAggregation(filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()) .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) @@ -934,7 +961,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true)) + .order(BucketOrder.aggregation("avg_i", true)) ).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -952,7 +979,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("num_tags", true)) + .order(BucketOrder.aggregation("num_tags", true)) .subAggregation(terms("num_tags").field("num_tags") .collectMode(randomFrom(SubAggCollectionMode.values()))) ).execute().actionGet(); @@ -972,7 +999,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)) + .order(BucketOrder.aggregation("stats.foo", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -992,7 +1019,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)) + .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1011,7 +1038,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1043,7 +1070,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1073,7 +1100,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1103,7 +1130,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.variance", asc)) + .order(BucketOrder.aggregation("stats.variance", asc)) .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1129,47 +1156,47 @@ public class LongTermsIT extends AbstractTermsTestCase { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { long[] expectedKeys = new long[] { 1, 2, 4, 3, 7, 6, 5 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), - Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(long[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx").setTypes("multi_sort_type") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.compound(order)) + .order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")) .subAggregation(sum("sum_d").field("d")) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index e1e8f1ba660..038227239cc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -190,122 +191,122 @@ public class MinDocCountIT extends AbstractTermsTestCase { } public void testStringTermAsc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.key(true)); } public void testStringScriptTermAsc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.key(true)); } public void testStringTermDesc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.key(false)); } public void testStringScriptTermDesc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.key(false)); } public void testStringCountAsc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(true)); } public void testStringScriptCountAsc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(true)); } public void testStringCountDesc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(false)); } public void testStringScriptCountDesc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(false)); } public void testStringCountAscWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*", true); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(true), ".*a.*", true); } public void testStringScriptCountAscWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*", true); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(true), ".*a.*", true); } public void testStringCountDescWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*", true); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(false), ".*a.*", true); } public void testStringScriptCountDescWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*", true); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(false), ".*a.*", true); } public void testLongTermAsc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.key(true)); } public void testLongScriptTermAsc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.key(true)); } public void testLongTermDesc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.key(false)); } public void testLongScriptTermDesc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.key(false)); } public void testLongCountAsc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.count(true)); } public void testLongScriptCountAsc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.count(true)); } public void testLongCountDesc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.count(false)); } public void testLongScriptCountDesc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.count(false)); } public void testDoubleTermAsc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.key(true)); } public void testDoubleScriptTermAsc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.key(true)); } public void testDoubleTermDesc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.key(false)); } public void testDoubleScriptTermDesc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.key(false)); } public void testDoubleCountAsc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.count(true)); } public void testDoubleScriptCountAsc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.count(true)); } public void testDoubleCountDesc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.count(false)); } public void testDoubleScriptCountDesc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.count(false)); } - private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception { + private void testMinDocCountOnTerms(String field, Script script, BucketOrder order) throws Exception { testMinDocCountOnTerms(field, script, order, null, true); } - private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include, boolean retry) throws Exception { + private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) @@ -342,38 +343,38 @@ public class MinDocCountIT extends AbstractTermsTestCase { } public void testHistogramCountAsc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.COUNT_ASC); + testMinDocCountOnHistogram(BucketOrder.count(true)); } public void testHistogramCountDesc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.COUNT_DESC); + testMinDocCountOnHistogram(BucketOrder.count(false)); } public void testHistogramKeyAsc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.KEY_ASC); + testMinDocCountOnHistogram(BucketOrder.key(true)); } public void testHistogramKeyDesc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.KEY_DESC); + testMinDocCountOnHistogram(BucketOrder.key(false)); } public void testDateHistogramCountAsc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.COUNT_ASC); + testMinDocCountOnDateHistogram(BucketOrder.count(true)); } public void testDateHistogramCountDesc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.COUNT_DESC); + testMinDocCountOnDateHistogram(BucketOrder.count(false)); } public void testDateHistogramKeyAsc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.KEY_ASC); + testMinDocCountOnDateHistogram(BucketOrder.key(true)); } public void testDateHistogramKeyDesc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.KEY_DESC); + testMinDocCountOnDateHistogram(BucketOrder.key(false)); } - private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception { + private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) @@ -393,7 +394,7 @@ public class MinDocCountIT extends AbstractTermsTestCase { } } - private void testMinDocCountOnDateHistogram(Histogram.Order order) throws Exception { + private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) .setQuery(QUERY) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index f6db12a7f6d..5b8c3b878c1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -150,7 +151,7 @@ public class NaNSortingIT extends ESIntegTestCase { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(agg.builder()).order(Terms.Order.aggregation(agg.sortKey(), asc))) + .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) .execute().actionGet(); assertSearchResponse(response); @@ -175,7 +176,7 @@ public class NaNSortingIT extends ESIntegTestCase { SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo") - .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(Histogram.Order.aggregation(agg.sortKey(), asc))) + .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) .execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index 6118cb69ee7..aaf366c7c7b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.nested.Nested; import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -349,13 +350,13 @@ public class ReverseNestedIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("idx2") .addAggregation(nested("nested1", "nested1.nested2") .subAggregation( - terms("field2").field("nested1.nested2.field2").order(Terms.Order.term(true)) + terms("field2").field("nested1.nested2.field2").order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .size(10000) .subAggregation( reverseNested("nested1_to_field1").path("nested1") .subAggregation( - terms("field1").field("nested1.field1").order(Terms.Order.term(true)) + terms("field1").field("nested1.field1").order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) ) ) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index ebd078de674..328ce538feb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBu import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -99,7 +100,7 @@ public class SamplerIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") - .order(Terms.Order.aggregation("sample>max_price.value", asc)) + .order(BucketOrder.aggregation("sample>max_price.value", asc)) .subAggregation(sampler("sample").shardSize(100) .subAggregation(max("max_price").field("price"))) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 748c5f886f6..4c03ca9a84e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.HashMap; import java.util.List; @@ -39,7 +40,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -62,7 +63,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -86,7 +87,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -110,7 +111,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -133,7 +134,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -156,7 +157,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -179,7 +180,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -202,7 +203,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -226,7 +227,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -249,7 +250,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -272,7 +273,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -295,7 +296,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -318,7 +319,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -341,7 +342,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -364,7 +365,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java index df69cfcfa93..0c93ff2f6bb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java @@ -42,10 +42,12 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -72,6 +74,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.count; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; @@ -129,9 +132,15 @@ public class StringTermsIT extends AbstractTermsTestCase { List builders = new ArrayList<>(); for (int i = 0; i < 5; i++) { builders.add(client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val" + i).field("i", i) - .field("tag", i < 5 / 2 + 1 ? "more" : "less").startArray(MULTI_VALUED_FIELD_NAME).value("val" + i) - .value("val" + (i + 1)).endArray().endObject())); + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + i) + .field("i", i) + .field("constant", 1) + .field("tag", i < 5 / 2 + 1 ? "more" : "less") + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + i) + .value("val" + (i + 1)) + .endArray().endObject())); } getMultiSortDocs(builders); @@ -456,15 +465,15 @@ public class StringTermsIT extends AbstractTermsTestCase { } } - + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); } - + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); } - + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") @@ -492,8 +501,8 @@ public class StringTermsIT extends AbstractTermsTestCase { } } assertEquals(expectedCardinality, foundTerms.size()); - } - + } + public void testSingleValueFieldWithMaxSize() throws Exception { SearchResponse response = client() @@ -503,7 +512,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME).size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -527,7 +536,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))).execute() + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))).execute() .actionGet(); assertSearchResponse(response); @@ -552,7 +561,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(false))).execute() + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(false))).execute() .actionGet(); assertSearchResponse(response); @@ -944,7 +953,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -966,6 +975,34 @@ public class StringTermsIT extends AbstractTermsTestCase { } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .setTypes("type") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Max max = bucket.getAggregations().get("max_constant"); + assertThat(max, notNullValue()); + assertThat(max.getValue(), equalTo((double) 1)); + i++; + } + } + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { @@ -975,7 +1012,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("inner_terms>avg", asc)) + .order(BucketOrder.aggregation("inner_terms>avg", asc)) .subAggregation(terms("inner_terms").field(MULTI_VALUED_FIELD_NAME).subAggregation(avg("avg").field("i")))) .execute().actionGet(); fail("Expected an exception"); @@ -985,7 +1022,7 @@ public class StringTermsIT extends AbstractTermsTestCase { ElasticsearchException rootCause = rootCauses[0]; if (rootCause instanceof AggregationExecutionException) { AggregationExecutionException aggException = (AggregationExecutionException) rootCause; - assertThat(aggException.getMessage(), Matchers.startsWith("Invalid terms aggregation order path")); + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); } else { throw e; } @@ -1002,7 +1039,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()).field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("filter", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).execute().actionGet(); assertSearchResponse(response); @@ -1041,7 +1078,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>stats.max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( @@ -1104,7 +1141,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( @@ -1167,7 +1204,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( @@ -1222,7 +1259,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet(); + .order(BucketOrder.aggregation("avg_i", true))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -1240,7 +1277,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("values", true)) + .order(BucketOrder.aggregation("values", true)) .subAggregation(terms("values").field("i").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -1262,7 +1299,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i"))) + .order(BucketOrder.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i"))) .execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + "with an unknown specified metric to order by. response had " + response.getFailedShards() + " failed shards."); @@ -1281,7 +1318,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute() + .order(BucketOrder.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute() .actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " @@ -1300,7 +1337,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1331,7 +1368,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1361,7 +1398,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1393,7 +1430,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.sum_of_squares", asc)) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) .subAggregation(extendedStats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1425,7 +1462,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.sum_of_squares", asc)) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) .subAggregation(extendedStats("stats").field("i")) .subAggregation(terms("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -1464,46 +1501,46 @@ public class StringTermsIT extends AbstractTermsTestCase { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val4", "val3", "val7", "val6", "val5" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { String[] expectedKeys = new String[] { "val5", "val6", "val7", "val3", "val4", "val2", "val1" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { String[] expectedKeys = new String[] { "val6", "val7", "val3", "val4", "val5", "val1", "val2" }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { String[] expectedKeys = new String[] { "val6", "val7", "val3", "val5", "val4", "val1", "val2" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { String[] expectedKeys = new String[] { "val2", "val1", "val4", "val5", "val3", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(String[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.compound(order)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 9ed32ca2e7b..204de33440d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -27,8 +27,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -347,7 +347,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -360,7 +360,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -378,7 +378,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -391,7 +391,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -409,7 +409,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -422,7 +422,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -440,7 +440,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -454,7 +454,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -473,7 +473,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -487,7 +487,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -583,7 +583,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -596,7 +596,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -614,7 +614,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -627,7 +627,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -645,7 +645,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -658,7 +658,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -676,7 +676,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -690,7 +690,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -709,7 +709,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) .execute().actionGet(); @@ -723,7 +723,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) .execute().actionGet(); @@ -819,7 +819,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -832,7 +832,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -850,7 +850,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -863,7 +863,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -881,7 +881,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -894,7 +894,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -912,7 +912,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -926,7 +926,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -945,7 +945,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -959,7 +959,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -968,7 +968,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { assertUnboundedDocCountError(size, accurateResponse, testResponse); } - + /** * Test a case where we know exactly how many of each term is on each shard * so we know the exact error value for each term. To do this we search over @@ -984,39 +984,39 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); assertSearchResponse(response); - + Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getDocCountError(), equalTo(46L)); List buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(5)); - + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("A")); assertThat(bucket.getDocCount(), equalTo(100L)); assertThat(bucket.getDocCountError(), equalTo(0L)); - + bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("Z")); assertThat(bucket.getDocCount(), equalTo(52L)); assertThat(bucket.getDocCountError(), equalTo(2L)); - + bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("C")); assertThat(bucket.getDocCount(), equalTo(50L)); assertThat(bucket.getDocCountError(), equalTo(15L)); - - + + bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("G")); assertThat(bucket.getDocCount(), equalTo(45L)); assertThat(bucket.getDocCountError(), equalTo(2L)); - + bucket = buckets.get(4); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("B")); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index f7e3d9a61b5..c094c245dac 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -129,7 +130,7 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase { // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned SearchResponse response = client().prepareSearch(index) .addAggregation( - terms("myTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true)) + terms("myTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()).order(BucketOrder.key(true)) ) .execute() .actionGet(); @@ -140,7 +141,7 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase { response = client().prepareSearch(index) .addAggregation( - terms("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true)) + terms("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()).order(BucketOrder.key(true)) ) .execute() .actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index 42f6ef78f4b..73c275cfd23 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -23,10 +23,10 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.ArrayList; import java.util.List; @@ -155,8 +155,12 @@ public class TermsTests extends BaseAggregationTestCase factory.includeExclude(incExc); } if (randomBoolean()) { - List order = randomOrder(); - factory.order(order); + List order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); + } } if (randomBoolean()) { factory.showTermDocCountError(randomBoolean()); @@ -164,20 +168,20 @@ public class TermsTests extends BaseAggregationTestCase return factory; } - private List randomOrder() { - List orders = new ArrayList<>(); + private List randomOrder() { + List orders = new ArrayList<>(); switch (randomInt(4)) { case 0: - orders.add(Terms.Order.term(randomBoolean())); + orders.add(BucketOrder.key(randomBoolean())); break; case 1: - orders.add(Terms.Order.count(randomBoolean())); + orders.add(BucketOrder.count(randomBoolean())); break; case 2: - orders.add(Terms.Order.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 3: - orders.add(Terms.Order.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 4: int numOrders = randomIntBetween(1, 3); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index 50cb4530493..c34b6093e2d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.InternalAggregationTestCase; import org.joda.time.DateTime; @@ -66,7 +67,7 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations)); } - InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC); + BucketOrder order = randomFrom(BucketOrder.key(true), BucketOrder.key(false)); return new InternalDateHistogram(name, buckets, order, 1, 0L, null, format, keyed, pipelineAggregators, metaData); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java index b4ecf828e7d..cb37dd9a373 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java @@ -22,11 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.test.InternalAggregationTestCase; import java.util.ArrayList; import java.util.List; @@ -58,7 +58,7 @@ public class InternalHistogramTests extends InternalMultiBucketAggregationTestCa final int docCount = TestUtil.nextInt(random(), 1, 50); buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, aggregations)); } - InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC); + BucketOrder order = BucketOrder.key(randomBoolean()); return new InternalHistogram(name, buckets, order, 1, null, format, keyed, pipelineAggregators, metaData); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsTests.java index 45531e27dde..c2a0b726b86 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -40,7 +41,7 @@ public class DoubleTermsTests extends InternalTermsTestCase { InternalAggregations aggregations, boolean showTermDocCountError, long docCountError) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java index cc97e4989a9..941997d3372 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -40,7 +41,7 @@ public class LongTermsTests extends InternalTermsTestCase { InternalAggregations aggregations, boolean showTermDocCountError, long docCountError) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java index e909358be5e..bdafb139d78 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -41,7 +42,7 @@ public class StringTermsTests extends InternalTermsTestCase { InternalAggregations aggregations, boolean showTermDocCountError, long docCountError) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 1648d8ede9f..7b93653fff8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -70,7 +71,7 @@ public class TermsAggregatorTests extends AggregatorTestCase { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionMode.toString()) .field("string") - .order(Terms.Order.term(true)); + .order(BucketOrder.key(true)); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); fieldType.setName("string"); fieldType.setHasDocValues(true ); @@ -99,7 +100,7 @@ public class TermsAggregatorTests extends AggregatorTestCase { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG) .executionHint(executionMode.toString()) .field("number") - .order(Terms.Order.term(true)); + .order(BucketOrder.key(true)); List aggs = new ArrayList<> (); int numLongs = randomIntBetween(1, 3); for (int i = 0; i < numLongs; i++) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index c952f43eb30..c51c0aec4bb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -36,8 +36,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -326,7 +326,7 @@ public class AvgIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>avg", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>avg", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(avg("avg").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 3903dd8b0bc..7de333e8127 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -30,9 +30,9 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -595,7 +595,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>extendedStats.avg", true))) + .addAggregation(terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) .subAggregation( filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 5b56e6b7efb..586af22755c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -29,11 +29,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -485,7 +485,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .subAggregation( percentileRanks("percentile_ranks").field("value").method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits).values(99)) - .order(Order.aggregation("percentile_ranks", "99", asc))).execute().actionGet(); + .order(BucketOrder.aggregation("percentile_ranks", "99", asc))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -506,7 +506,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentileRanks("ranks").method(PercentilesMethod.HDR).values(99).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 56fb14402ad..ae745e1f1ad 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -30,11 +30,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -474,7 +474,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .percentiles(99)) - .order(Order.aggregation("percentiles", "99", asc))).execute().actionGet(); + .order(BucketOrder.aggregation("percentiles", "99", asc))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -497,7 +497,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentiles("percentiles").method(PercentilesMethod.HDR).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java index 03eb9a09237..a192b3c4a12 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -328,7 +328,7 @@ public class MaxIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>max", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>max", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(max("max").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java index cba2ba9eb97..7f2522c04bb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.min.Min; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -340,7 +340,7 @@ public class MinIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>min", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>min", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(min("min").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 9231f093963..0fcf794ee1d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -32,8 +32,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -447,7 +447,7 @@ public class StatsIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>stats.avg", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 227ffc7251b..86f59659ebc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -36,8 +36,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -325,7 +325,7 @@ public class SumIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>sum", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index f1943747ceb..11ff1edbc53 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -30,12 +30,12 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -435,7 +435,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { .addAggregation( histogram("histo").field("value").interval(2L) .subAggregation(randomCompression(percentileRanks("percentile_ranks").field("value").values(99))) - .order(Order.aggregation("percentile_ranks", "99", asc))) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -457,7 +457,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentileRanks("ranks").method(PercentilesMethod.TDIGEST).values(99).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 2589e9977a6..89c7d12c746 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -30,12 +30,12 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -419,7 +419,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { .addAggregation( histogram("histo").field("value").interval(2L) .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) - .order(Order.aggregation("percentiles", "99", asc))) + .order(BucketOrder.aggregation("percentiles", "99", asc))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -442,7 +442,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation( - terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 6819fddf3e3..563fac1ba7d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.rescore.RescoreBuilder; @@ -398,7 +399,7 @@ public class TopHitsIT extends ESIntegTestCase { .executionHint(randomExecutionHint()) .collectMode(SubAggCollectionMode.BREADTH_FIRST) .field(TERMS_AGGS_FIELD) - .order(Terms.Order.aggregation("max", false)) + .order(BucketOrder.aggregation("max", false)) .subAggregation(max("max").field(SORT_FIELD)) .subAggregation(topHits("hits").size(3)) ).get(); @@ -494,7 +495,7 @@ public class TopHitsIT extends ESIntegTestCase { .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) - .order(Terms.Order.aggregation("max_sort", false)) + .order(BucketOrder.aggregation("max_sort", false)) .subAggregation( topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true) ) @@ -535,7 +536,7 @@ public class TopHitsIT extends ESIntegTestCase { .setQuery(matchQuery("text", "term rare")) .addAggregation( terms("terms").executionHint(randomExecutionHint()).field("group") - .order(Terms.Order.aggregation("max_score", false)).subAggregation(topHits("hits").size(1)) + .order(BucketOrder.aggregation("max_score", false)).subAggregation(topHits("hits").size(1)) .subAggregation(max("max_score").field("value"))).get(); assertSearchResponse(response); @@ -908,7 +909,6 @@ public class TopHitsIT extends ESIntegTestCase { histogram("dates") .field("date") .interval(5) - .order(Histogram.Order.aggregation("to-comments", true)) .subAggregation( nested("to-comments", "comments") .subAggregation(topHits("comments") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index 4f6ff0e32ed..1d29518a300 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -24,10 +24,10 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -129,7 +129,7 @@ public class AvgBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -211,7 +211,7 @@ public class AvgBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -264,7 +264,7 @@ public class AvgBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -337,7 +337,7 @@ public class AvgBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 607124ecb15..fea143bddcc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -26,12 +26,12 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -200,7 +200,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -300,7 +300,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -362,7 +362,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -445,7 +445,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -475,7 +475,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 632f11f7ec7..50b512ee194 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -25,11 +25,11 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -138,7 +138,7 @@ public class MaxBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -230,7 +230,7 @@ public class MaxBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -335,7 +335,7 @@ public class MaxBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -413,7 +413,7 @@ public class MaxBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index 04fdd0c3133..33678f146db 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -24,11 +24,11 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -135,7 +135,7 @@ public class MinBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -227,7 +227,7 @@ public class MinBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -285,7 +285,7 @@ public class MinBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -363,7 +363,7 @@ public class MinBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index e23e5441431..62f9ad462e9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -133,7 +134,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -248,7 +249,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -301,7 +302,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -435,7 +436,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -466,7 +467,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -525,7 +526,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index 231005f1b5b..ff5a85f198e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -24,11 +24,11 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -136,7 +136,7 @@ public class StatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -230,7 +230,7 @@ public class StatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -289,7 +289,7 @@ public class StatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -368,7 +368,7 @@ public class StatsBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index 048dfac8648..69451435d58 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -24,10 +24,10 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -126,7 +126,7 @@ public class SumBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -202,7 +202,7 @@ public class SumBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -252,7 +252,7 @@ public class SumBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -322,7 +322,7 @@ public class SumBucketIT extends ESIntegTestCase { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc index 99b871730e6..1fe945077fd 100644 --- a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -67,3 +67,7 @@ key [2005-01-01T00:00:00.000Z], date [2005], doc_count [1] key [2007-01-01T00:00:00.000Z], date [2007], doc_count [2] key [2008-01-01T00:00:00.000Z], date [2008], doc_count [3] -------------------------------------------------- + +===== Order + +Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc index 28e9cd3ecd0..59bb555401c 100644 --- a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc @@ -42,3 +42,7 @@ for (Histogram.Bucket entry : agg.getBuckets()) { logger.info("key [{}], doc_count [{}]", key, docCount); } -------------------------------------------------- + +===== Order + +Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc b/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc index ad83faccd31..db584fd4ced 100644 --- a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc @@ -39,7 +39,14 @@ for (Terms.Bucket entry : genders.getBuckets()) { } -------------------------------------------------- -==== Order +===== Order + +Import bucket ordering strategy classes: + +[source,java] +-------------------------------------------------- +import org.elasticsearch.search.aggregations.BucketOrder; +-------------------------------------------------- Ordering the buckets by their `doc_count` in an ascending manner: @@ -48,7 +55,7 @@ Ordering the buckets by their `doc_count` in an ascending manner: AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.count(true)) + .order(BucketOrder.count(true)) -------------------------------------------------- Ordering the buckets alphabetically by their terms in an ascending manner: @@ -58,7 +65,7 @@ Ordering the buckets alphabetically by their terms in an ascending manner: AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) -------------------------------------------------- Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name): @@ -68,7 +75,22 @@ Ordering the buckets by single value metrics sub-aggregation (identified by the AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.aggregation("avg_height", false)) + .order(BucketOrder.aggregation("avg_height", false)) + .subAggregation( + AggregationBuilders.avg("avg_height").field("height") + ) +-------------------------------------------------- + +Ordering the buckets by multiple criteria: + +[source,java] +-------------------------------------------------- +AggregationBuilders + .terms("genders") + .field("gender") + .order(BucketOrder.compound( // in order of priority: + BucketOrder.aggregation("avg_height", false), // sort by sub-aggregation first + BucketOrder.count(true))) // then bucket count as a tie-breaker .subAggregation( AggregationBuilders.avg("avg_height").field("height") ) diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index b7619b175df..47265a0b224 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -390,3 +390,10 @@ POST /sales/_search?size=0 // TEST[setup:sales] <1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. + +==== Order + +By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using +the `order` setting. Supports the same `order` functionality as the <>. + +deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index de828c62aa9..380d06258da 100644 --- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -179,120 +179,8 @@ POST /sales/_search?size=0 ==== Order -By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled -using the `order` setting. - -Ordering the buckets by their key - descending: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "_key" : "desc" } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -Ordering the buckets by their `doc_count` - ascending: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "_count" : "asc" } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -If the histogram aggregation has a direct metrics sub-aggregation, the latter can determine the order of the buckets: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "price_stats.min" : "asc" } <1> - }, - "aggs" : { - "price_stats" : { "stats" : {"field" : "price"} } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -<1> The `{ "price_stats.min" : asc" }` will sort the buckets based on `min` value of their `price_stats` sub-aggregation. - -It is also possible to order the buckets based on a "deeper" aggregation in the hierarchy. This is supported as long -as the aggregations path are of a single-bucket type, where the last aggregation in the path may either by a single-bucket -one or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`), -in case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of -a multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value). - -The path must be defined in the following form: - -// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form -[source,ebnf] --------------------------------------------------- -AGG_SEPARATOR = '>' ; -METRIC_SEPARATOR = '.' ; -AGG_NAME = ; -METRIC = ; -PATH = [ , ]* [ , ] ; --------------------------------------------------- - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "promoted_products>rating_stats.avg" : "desc" } <1> - }, - "aggs" : { - "promoted_products" : { - "filter" : { "term" : { "promoted" : true }}, - "aggs" : { - "rating_stats" : { "stats" : { "field" : "rating" }} - } - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -The above will sort the buckets based on the avg rating among the promoted products - +By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using +the `order` setting. Supports the same `order` functionality as the <>. ==== Offset diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 0b028c1a940..90a5586d9e4 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -280,13 +280,14 @@ Ordering the buckets alphabetically by their terms in an ascending manner: "genres" : { "terms" : { "field" : "genre", - "order" : { "_term" : "asc" } + "order" : { "_key" : "asc" } } } } } -------------------------------------------------- +deprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term] Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name): diff --git a/docs/reference/migration/migrate_6_0/java.asciidoc b/docs/reference/migration/migrate_6_0/java.asciidoc index 5693d508526..43feb15f84b 100644 --- a/docs/reference/migration/migrate_6_0/java.asciidoc +++ b/docs/reference/migration/migrate_6_0/java.asciidoc @@ -26,4 +26,12 @@ When sending a request through the request builders e.g. client.prepareSearch(). be possible to call `addListener` against the returned `ListenableActionFuture`. With this change an `ActionFuture` is returned instead, which is consistent with what the `Client` methods return, hence it is not possible to associate the future with listeners. The `execute` method that accept a listener -as an argument can be used instead. \ No newline at end of file +as an argument can be used instead. + +==== `Terms.Order` and `Histogram.Order` classes replace by `BucketOrder` + +The `terms`, `histogram`, and `date_histogram` aggregation code has been refactored to use common +code for ordering buckets. The `BucketOrder` class must be used instead of `Terms.Order` and +`Histogram.Order`. The `static` methods in the `BucketOrder` class must be called instead of directly +accessing internal order instances, e.g. `BucketOrder.count(boolean)` and `BucketOrder.aggregation(String, boolean)`. +Use `BucketOrder.key(boolean)` to order the `terms` aggregation buckets by `_term`. diff --git a/docs/reference/migration/migrate_6_0/search.asciidoc b/docs/reference/migration/migrate_6_0/search.asciidoc index 80d67eae72d..82c2ba8f717 100644 --- a/docs/reference/migration/migrate_6_0/search.asciidoc +++ b/docs/reference/migration/migrate_6_0/search.asciidoc @@ -51,6 +51,8 @@ * The `disable_coord` parameter of the `bool` and `common_terms` queries has been removed. If provided, it will be ignored and issue a deprecation warning. +* The `template` query has been removed. This query was deprecated since 5.0 + ==== Search shards API The search shards API no longer accepts the `type` url parameter, which didn't diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 460b3be461d..c6a8a252b93 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -51,7 +51,7 @@ certain tasks. |built-in |templates -|<> +|<> |n/a |you write it! |expert API @@ -83,6 +83,4 @@ include::scripting/painless-debugging.asciidoc[] include::scripting/expression.asciidoc[] -include::scripting/native.asciidoc[] - -include::scripting/advanced-scripting.asciidoc[] +include::scripting/engine.asciidoc[] diff --git a/docs/reference/modules/scripting/advanced-scripting.asciidoc b/docs/reference/modules/scripting/advanced-scripting.asciidoc deleted file mode 100644 index a5fcc12777d..00000000000 --- a/docs/reference/modules/scripting/advanced-scripting.asciidoc +++ /dev/null @@ -1,189 +0,0 @@ -[[modules-advanced-scripting]] -=== Advanced text scoring in scripts - -experimental[The functionality described on this page is considered experimental and may be changed or removed in a future release] - -Text features, such as term or document frequency for a specific term can be -accessed in scripts with the `_index` variable. This can be useful if, for -example, you want to implement your own scoring model using for example a -script inside a <>. -Statistics over the document collection are computed *per shard*, not per -index. - -It should be noted that the `_index` variable is not supported in the painless language, but `_index` is defined when using the groovy language. - -[float] -=== Nomenclature: - - -[horizontal] -`df`:: - - document frequency. The number of documents a term appears in. Computed - per field. - - -`tf`:: - - term frequency. The number times a term appears in a field in one specific - document. - -`ttf`:: - - total term frequency. The number of times this term appears in all - documents, that is, the sum of `tf` over all documents. Computed per - field. - -`df` and `ttf` are computed per shard and therefore these numbers can vary -depending on the shard the current document resides in. - - -[float] -=== Shard statistics: - -`_index.numDocs()`:: - - Number of documents in shard. - -`_index.maxDoc()`:: - - Maximal document number in shard. - -`_index.numDeletedDocs()`:: - - Number of deleted documents in shard. - - -[float] -=== Field statistics: - -Field statistics can be accessed with a subscript operator like this: -`_index['FIELD']`. - - -`_index['FIELD'].docCount()`:: - - Number of documents containing the field `FIELD`. Does not take deleted documents into account. - -`_index['FIELD'].sumttf()`:: - - Sum of `ttf` over all terms that appear in field `FIELD` in all documents. - -`_index['FIELD'].sumdf()`:: - - The sum of `df` s over all terms that appear in field `FIELD` in all - documents. - - -Field statistics are computed per shard and therefore these numbers can vary -depending on the shard the current document resides in. -The number of terms in a field cannot be accessed using the `_index` variable. See <> for how to do that. - -[float] -=== Term statistics: - -Term statistics for a field can be accessed with a subscript operator like -this: `_index['FIELD']['TERM']`. This will never return null, even if term or field does not exist. -If you do not need the term frequency, call `_index['FIELD'].get('TERM', 0)` -to avoid unnecessary initialization of the frequencies. The flag will have only -affect is your set the <> to `docs`. - - -`_index['FIELD']['TERM'].df()`:: - - `df` of term `TERM` in field `FIELD`. Will be returned, even if the term - is not present in the current document. - -`_index['FIELD']['TERM'].ttf()`:: - - The sum of term frequencies of term `TERM` in field `FIELD` over all - documents. Will be returned, even if the term is not present in the - current document. - -`_index['FIELD']['TERM'].tf()`:: - - `tf` of term `TERM` in field `FIELD`. Will be 0 if the term is not present - in the current document. - - -[float] -=== Term positions, offsets and payloads: - -If you need information on the positions of terms in a field, call -`_index['FIELD'].get('TERM', flag)` where flag can be - -[horizontal] -`_POSITIONS`:: if you need the positions of the term -`_OFFSETS`:: if you need the offsets of the term -`_PAYLOADS`:: if you need the payloads of the term -`_CACHE`:: if you need to iterate over all positions several times - -The iterator uses the underlying lucene classes to iterate over positions. For efficiency reasons, you can only iterate over positions once. If you need to iterate over the positions several times, set the `_CACHE` flag. - -You can combine the operators with a `|` if you need more than one info. For -example, the following will return an object holding the positions and payloads, -as well as all statistics: - - - `_index['FIELD'].get('TERM', _POSITIONS | _PAYLOADS)` - - -Positions can be accessed with an iterator that returns an object -(`POS_OBJECT`) holding position, offsets and payload for each term position. - -`POS_OBJECT.position`:: - - The position of the term. - -`POS_OBJECT.startOffset`:: - - The start offset of the term. - -`POS_OBJECT.endOffset`:: - - The end offset of the term. - -`POS_OBJECT.payload`:: - - The payload of the term. - -`POS_OBJECT.payloadAsInt(missingValue)`:: - - The payload of the term converted to integer. If the current position has - no payload, the `missingValue` will be returned. Call this only if you - know that your payloads are integers. - -`POS_OBJECT.payloadAsFloat(missingValue)`:: - - The payload of the term converted to float. If the current position has no - payload, the `missingValue` will be returned. Call this only if you know - that your payloads are floats. - -`POS_OBJECT.payloadAsString()`:: - - The payload of the term converted to string. If the current position has - no payload, `null` will be returned. Call this only if you know that your - payloads are strings. - - -Example: sums up all payloads for the term `foo`. - -[source,groovy] ---------------------------------------------------------- -termInfo = _index['my_field'].get('foo',_PAYLOADS); -score = 0; -for (pos in termInfo) { - score = score + pos.payloadAsInt(0); -} -return score; ---------------------------------------------------------- - - -[float] -=== Term vectors: - -The `_index` variable can only be used to gather statistics for single terms. If you want to use information on all terms in a field, you must store the term vectors (see <>). To access them, call -`_index.termVectors()` to get a -https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[Fields] -instance. This object can then be used as described in https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[lucene doc] to iterate over fields and then for each field iterate over each term in the field. -The method will return null if the term vectors were not stored. diff --git a/docs/reference/modules/scripting/engine.asciidoc b/docs/reference/modules/scripting/engine.asciidoc new file mode 100644 index 00000000000..207722d8feb --- /dev/null +++ b/docs/reference/modules/scripting/engine.asciidoc @@ -0,0 +1,57 @@ +[[modules-scripting-engine]] +=== Advanced scripts using script engines + +A `ScriptEngine` is a backend for implementing a scripting language. It may also +be used to write scripts that need to use advanced internals of scripting. For example, +a script that wants to use term frequencies while scoring. + +The plugin {plugins}/plugin-authors.html[documentation] has more information on +how to write a plugin so that Elasticsearch will properly load it. To register +the `ScriptEngine`, your plugin should implement the `ScriptPlugin` interface +and override the `getScriptEngine(Settings settings)` method. + +The following is an example of a custom `ScriptEngine` which uses the language +name `expert_scripts`. It implements a single script called `pure_df` which +may be used as a search script to override each document's score as +the document frequency of a provided term. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{docdir}/../../plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java[expert_engine] +-------------------------------------------------- + +You can execute the script by specifying its `lang` as `expert_scripts`, and the name +of the script as the the script source: + + +[source,js] +-------------------------------------------------- +POST /_search +{ + "query": { + "function_score": { + "query": { + "match": { + "body": "foo" + } + }, + "functions": [ + { + "script_score": { + "script": { + "inline": "pure_df", + "lang" : "expert_scripts", + "params": { + "field": "body", + "term": "foo" + } + } + } + } + ] + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:we don't have an expert script plugin installed to test this] diff --git a/docs/reference/modules/scripting/native.asciidoc b/docs/reference/modules/scripting/native.asciidoc deleted file mode 100644 index bc0ad3bcaf9..00000000000 --- a/docs/reference/modules/scripting/native.asciidoc +++ /dev/null @@ -1,86 +0,0 @@ -[[modules-scripting-native]] -=== Native (Java) Scripts - -Sometimes `painless` and <> aren't enough. For those times you can -implement a native script. - -The best way to implement a native script is to write a plugin and install it. -The plugin {plugins}/plugin-authors.html[documentation] has more information on -how to write a plugin so that Elasticsearch will properly load it. - -To register the actual script you'll need to implement `NativeScriptFactory` -to construct the script. The actual script will extend either -`AbstractExecutableScript` or `AbstractSearchScript`. The second one is likely -the most useful and has several helpful subclasses you can extend like -`AbstractLongSearchScript` and `AbstractDoubleSearchScript`. -Finally, your plugin should register the native script by implementing the -`ScriptPlugin` interface. - -If you squashed the whole thing into one class it'd look like: - -[source,java] --------------------------------------------------- -public class MyNativeScriptPlugin extends Plugin implements ScriptPlugin { - - @Override - public List getNativeScripts() { - return Collections.singletonList(new MyNativeScriptFactory()); - } - - public static class MyNativeScriptFactory implements NativeScriptFactory { - @Override - public ExecutableScript newScript(@Nullable Map params) { - return new MyNativeScript(); - } - @Override - public boolean needsScores() { - return false; - } - @Override - public String getName() { - return "my_script"; - } - } - - public static class MyNativeScript extends AbstractDoubleSearchScript { - @Override - public double runAsDouble() { - double a = (double) source().get("a"); - double b = (double) source().get("b"); - return a * b; - } - } -} --------------------------------------------------- - -You can execute the script by specifying its `lang` as `native`, and the name -of the script as the `id`: - - -[source,js] --------------------------------------------------- -POST /_search -{ - "query": { - "function_score": { - "query": { - "match": { - "body": "foo" - } - }, - "functions": [ - { - "script_score": { - "script": { - "inline": "my_script", - "lang" : "native" - } - } - } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[skip:we don't have a native plugin installed to test this] diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index b705f01c6fc..3e3c140d6f5 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -9,12 +9,6 @@ This group contains queries which do not fit into the other groups: This query finds documents which are similar to the specified text, document, or collection of documents. -<>:: - -The `template` query accepts a Mustache template (either inline, indexed, or -from a file), and a map of parameters, and combines the two to generate the -final query to execute. - <>:: This query allows a script to act as a filter. Also see the @@ -27,8 +21,6 @@ the specified document. include::mlt-query.asciidoc[] -include::template-query.asciidoc[] - include::script-query.asciidoc[] include::percolate-query.asciidoc[] diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc deleted file mode 100644 index 2d3b5724d49..00000000000 --- a/docs/reference/query-dsl/template-query.asciidoc +++ /dev/null @@ -1,127 +0,0 @@ -[[query-dsl-template-query]] -=== Template Query - -deprecated[5.0.0, Use the <> API] - -A query that accepts a query template and a map of key/value pairs to fill in -template parameters. Templating is based on Mustache. For simple token substitution all you provide -is a query containing some variable that you want to substitute and the actual -values: - -[source,js] ------------------------------------------- -GET /_search -{ - "query": { - "template": { - "inline": { "match": { "text": "{{query_string}}" }}, - "params" : { - "query_string" : "all about search" - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[warning:[template] query is deprecated, use search template api instead] - -The above request is translated into: - -[source,js] ------------------------------------------- -GET /_search -{ - "query": { - "match": { - "text": "all about search" - } - } -} ------------------------------------------- -// CONSOLE - -Alternatively passing the template as an escaped string works as well: - -[source,js] ------------------------------------------- -GET /_search -{ - "query": { - "template": { - "inline": "{ \"match\": { \"text\": \"{{query_string}}\" }}", <1> - "params" : { - "query_string" : "all about search" - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[warning:[template] query is deprecated, use search template api instead] - -<1> New line characters (`\n`) should be escaped as `\\n` or removed, - and quotes (`"`) should be escaped as `\\"`. - -==== Stored templates - -You can register a template by storing it in the `config/scripts` directory, in a file using the `.mustache` extension. -In order to execute the stored template, reference it by name in the `file` -parameter: - - -[source,js] ------------------------------------------- -GET /_search -{ - "query": { - "template": { - "file": "my_template", <1> - "params" : { - "query_string" : "all about search" - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[warning:[template] query is deprecated, use search template api instead] - -<1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. - -Alternatively, you can register a query template in the cluster state with: - -[source,js] ------------------------------------------- -PUT /_search/template/my_template -{ - "template": { "match": { "text": "{{query_string}}" }} -} ------------------------------------------- -// CONSOLE - -and refer to it in the `template` query with the `id` parameter: - - -[source,js] ------------------------------------------- -GET /_search -{ - "query": { - "template": { - "stored": "my_template", <1> - "params" : { - "query_string" : "all about search" - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[continued] -// TEST[warning:[template] query is deprecated, use search template api instead] - -<1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. - - -There is also a dedicated `template` endpoint, allows you to template an entire search request. -Please see <> for more details. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index cf5d1ef5f4d..0644b549812 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -463,3 +463,13 @@ index that make warmers not necessary anymore. === Index time boosting The index time boost mapping has been replaced with query time boost (see <>). + +[role="exclude",id="modules-scripting-native"] +=== Native scripting + +Native scripts have been replaced with writing custom `ScriptEngine` backends (see <>). + +[role="exclude",id="modules-advanced-scripting"] +=== Advanced scripting + +Using `_index` in scripts has been replaced with writing `ScriptEngine` backends (see <>). diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index f4d4f46e3a8..53c0c9be4e4 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -213,8 +213,8 @@ The response contains suggestions scored by the most likely spell correction fir Checks each suggestion against the specified `query` to prune suggestions for which no matching docs exist in the index. The collate query for a suggestion is run only on the local shard from which the suggestion has - been generated from. The `query` must be specified, and it is run as - a <>. + been generated from. The `query` must be specified and it can be templated, + see <> for more information. The current suggestion is automatically made available as the `{{suggestion}}` variable, which should be used in your query. You can still specify your own template `params` -- the `suggestion` value will be added to the diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java index 7b5c97aa9a7..105fa3f6f80 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java @@ -39,8 +39,6 @@ import java.util.Arrays; import java.util.List; import java.util.function.Supplier; -import static java.util.Collections.singletonList; - public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin, SearchPlugin { @Override @@ -54,11 +52,6 @@ public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin new ActionHandler<>(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class)); } - @Override - public List> getQueries() { - return singletonList(new QuerySpec<>(TemplateQueryBuilder.NAME, TemplateQueryBuilder::new, TemplateQueryBuilder::fromXContent)); - } - @Override public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java deleted file mode 100644 index 66a5c09977b..00000000000 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script.mustache; - -import org.apache.lucene.search.Query; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.AbstractQueryBuilder; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; - -import java.io.IOException; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; - -/** - * Facilitates creating template query requests. - * */ -@Deprecated -// TODO remove this class in 6.0 -public class TemplateQueryBuilder extends AbstractQueryBuilder { - - public static final String NAME = "template"; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TemplateQueryBuilder.class)); - - /** Template to fill. */ - private final Script template; - - public TemplateQueryBuilder(String template, ScriptType scriptType, Map params) { - this(new Script(scriptType, "mustache", template, params)); - } - - public TemplateQueryBuilder(String template, ScriptType scriptType, Map params, XContentType ct) { - this(new Script(scriptType, "mustache", template, scriptType == ScriptType.INLINE ? - (ct == null ? Collections.emptyMap() : Collections.singletonMap(Script.CONTENT_TYPE_OPTION, ct.mediaType())) - : null, params)); - } - - TemplateQueryBuilder(Script template) { - DEPRECATION_LOGGER.deprecated("[{}] query is deprecated, use search template api instead", NAME); - if (template == null) { - throw new IllegalArgumentException("query template cannot be null"); - } - this.template = template; - } - - public Script template() { - return template; - } - - /** - * Read from a stream. - */ - public TemplateQueryBuilder(StreamInput in) throws IOException { - super(in); - template = new Script(in); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - template.writeTo(out); - } - - @Override - protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException { - builder.field(TemplateQueryBuilder.NAME); - template.toXContent(builder, builderParams); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - throw new UnsupportedOperationException("this query must be rewritten first"); - } - - @Override - protected int doHashCode() { - return Objects.hash(template); - } - - @Override - protected boolean doEquals(TemplateQueryBuilder other) { - return Objects.equals(template, other.template); - } - - @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - BytesReference querySource = queryRewriteContext.getTemplateBytes(template); - try (XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(queryRewriteContext.getXContentRegistry(), - querySource)) { - final QueryParseContext queryParseContext = queryRewriteContext.newParseContext(qSourceParser); - final QueryBuilder queryBuilder = queryParseContext.parseInnerQueryBuilder(); - if (boost() != DEFAULT_BOOST || queryName() != null) { - final BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - boolQueryBuilder.must(queryBuilder); - return boolQueryBuilder; - } - return queryBuilder; - } - } - - /** - * In the simplest case, parse template string and variables from the request, - * compile the template and execute the template against the given variables. - */ - public static TemplateQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - Script template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG); - - // for deprecation of stored script namespaces the default lang is ignored, - // so the template lang must be set for a stored script - if (template.getType() == ScriptType.STORED) { - template = new Script(ScriptType.STORED, Script.DEFAULT_TEMPLATE_LANG, template.getIdOrCode(), template.getParams()); - } - - return new TemplateQueryBuilder(template); - } -} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index b79257ebe2f..4ac9263706e 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; @@ -289,15 +288,6 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { assertHitCount(searchResponse.getResponse(), 1); assertWarnings("use of [/mustache/2] for looking up" + " stored scripts/templates has been deprecated, use only [2] instead"); - - Map vars = new HashMap<>(); - vars.put("fieldParam", "bar"); - - TemplateQueryBuilder builder = new TemplateQueryBuilder("3", ScriptType.STORED, vars); - SearchResponse sr = client().prepareSearch().setQuery(builder) - .execute().actionGet(); - assertHitCount(sr, 1); - assertWarnings("[template] query is deprecated, use search template api instead"); } // Relates to #10397 diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java deleted file mode 100644 index 3b70c5df626..00000000000 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script.mustache; - -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.MatchQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -import static org.hamcrest.Matchers.containsString; - -public class TemplateQueryBuilderTests extends AbstractQueryTestCase { - - /** - * The query type all template tests will be based on. - */ - private QueryBuilder templateBase; - - /** - * All tests in this class cause deprecation warnings when a new {@link TemplateQueryBuilder} is created. - * Instead of having to check them in every single test, we do it after each test is run - */ - @After - public void checkWarning() { - assertWarnings("[template] query is deprecated, use search template api instead"); - } - - @Override - protected Collection> getPlugins() { - return Arrays.asList(MustachePlugin.class, CustomScriptPlugin.class); - } - - public static class CustomScriptPlugin extends MockScriptPlugin { - - @Override - @SuppressWarnings("unchecked") - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - - scripts.put("{ \"match_all\" : {}}", - s -> new BytesArray("{ \"match_all\" : {}}")); - - scripts.put("{ \"match_all\" : {\"_name\" : \"foobar\"}}", - s -> new BytesArray("{ \"match_all\" : {\"_name\" : \"foobar\"}}")); - - scripts.put("{\n" + - " \"term\" : {\n" + - " \"foo\" : {\n" + - " \"value\" : \"bar\",\n" + - " \"boost\" : 2.0\n" + - " }\n" + - " }\n" + - "}", s -> new BytesArray("{\n" + - " \"term\" : {\n" + - " \"foo\" : {\n" + - " \"value\" : \"bar\",\n" + - " \"boost\" : 2.0\n" + - " }\n" + - " }\n" + - "}")); - return scripts; - } - } - - @Before - public void setup() { - templateBase = new MatchQueryBuilder("field", "some values"); - } - - @Override - protected boolean supportsBoostAndQueryName() { - return false; - } - - @Override - protected TemplateQueryBuilder doCreateTestQueryBuilder() { - return new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mustache", templateBase.toString(), Collections.emptyMap())); - } - - @Override - protected void doAssertLuceneQuery(TemplateQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - QueryShardContext queryShardContext = context.getQueryShardContext(); - assertEquals(rewrite(QueryBuilder.rewriteQuery(templateBase, queryShardContext).toQuery(queryShardContext)), rewrite(query)); - } - - public void testIllegalArgument() { - expectThrows(IllegalArgumentException.class, () -> new TemplateQueryBuilder((Script) null)); - } - - /** - * Override superclass test since template query doesn't support boost and queryName, so - * we need to mutate other existing field in the test query. - */ - @Override - public void testUnknownField() throws IOException { - TemplateQueryBuilder testQuery = createTestQueryBuilder(); - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.YAML); - String testQueryAsString = toXContent(testQuery, xContentType).string(); - String queryAsString = testQueryAsString.replace("inline", "bogusField"); - try { - parseQuery(createParser(xContentType.xContent(), queryAsString)); - fail("IllegalArgumentException expected"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("[script] unknown field [bogusField], parser not found")); - } - } - - public void testJSONGeneration() throws IOException { - Map vars = new HashMap<>(); - vars.put("template", "filled"); - TemplateQueryBuilder builder = new TemplateQueryBuilder("I am a $template string", ScriptType.INLINE, vars); - XContentBuilder content = XContentFactory.jsonBuilder(); - content.startObject(); - builder.doXContent(content, null); - content.endObject(); - content.close(); - assertEquals("{\"template\":{\"inline\":\"I am a $template string\",\"lang\":\"mustache\",\"params\":{\"template\":\"filled\"}}}", - content.string()); - } - - public void testRawEscapedTemplate() throws IOException { - String expectedTemplateString = "{\"match_{{template}}\": {}}\""; - String query = "{\"template\": {\"inline\": \"{\\\"match_{{template}}\\\": {}}\\\"\",\"params\" : {\"template\" : \"all\"}}}"; - Map params = new HashMap<>(); - params.put("template", "all"); - QueryBuilder expectedBuilder = new TemplateQueryBuilder(expectedTemplateString, ScriptType.INLINE, params); - assertParsedQuery(query, expectedBuilder); - } - - public void testRawTemplate() throws IOException { - String expectedTemplateString = "{\"match_{{template}}\":{}}"; - String query = "{\"template\": {\"inline\": {\"match_{{template}}\": {}},\"params\" : {\"template\" : \"all\"}}}"; - Map params = new HashMap<>(); - params.put("template", "all"); - QueryBuilder expectedBuilder = new TemplateQueryBuilder(expectedTemplateString, ScriptType.INLINE, params, XContentType.JSON); - assertParsedQuery(query, expectedBuilder); - } - - @Override - public void testMustRewrite() throws IOException { - String query = "{ \"match_all\" : {}}"; - QueryBuilder builder = new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mockscript", query, - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()), Collections.emptyMap())); - try { - builder.toQuery(createShardContext()); - fail(); - } catch (UnsupportedOperationException ex) { - assertEquals("this query must be rewritten first", ex.getMessage()); - } - assertEquals(new MatchAllQueryBuilder(), builder.rewrite(createShardContext())); - } - - public void testRewriteWithInnerName() throws IOException { - final String query = "{ \"match_all\" : {\"_name\" : \"foobar\"}}"; - QueryBuilder builder = new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mockscript", query, - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()), Collections.emptyMap())); - assertEquals(new MatchAllQueryBuilder().queryName("foobar"), builder.rewrite(createShardContext())); - - builder = new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mockscript", query, Collections.singletonMap( - Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()), Collections.emptyMap())).queryName("outer"); - assertEquals(new BoolQueryBuilder().must(new MatchAllQueryBuilder().queryName("foobar")).queryName("outer"), - builder.rewrite(createShardContext())); - } - - public void testRewriteWithInnerBoost() throws IOException { - final TermQueryBuilder query = new TermQueryBuilder("foo", "bar").boost(2); - QueryBuilder builder = new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mockscript", query.toString(), - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()), Collections.emptyMap())); - assertEquals(query, builder.rewrite(createShardContext())); - - builder = new TemplateQueryBuilder(new Script(ScriptType.INLINE, "mockscript", query.toString(), - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()), Collections.emptyMap())).boost(3); - assertEquals(new BoolQueryBuilder().must(query).boost(3), builder.rewrite(createShardContext())); - } - - @Override - protected Query rewrite(Query query) throws IOException { - // TemplateQueryBuilder adds some optimization if the template and query builder have boosts / query names that wraps - // the actual QueryBuilder that comes from the template into a BooleanQueryBuilder to give it an outer boost / name - // this causes some queries to be not exactly equal but equivalent such that we need to rewrite them before comparing. - if (query != null) { - MemoryIndex idx = new MemoryIndex(); - return idx.createSearcher().rewrite(query); - } - return new MatchAllDocsQuery(); // null == *:* - } -} diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml deleted file mode 100644 index a21184650f5..00000000000 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml +++ /dev/null @@ -1,96 +0,0 @@ ---- -"Template query": - - skip: - features: warnings - - - do: - index: - index: test - type: testtype - id: 1 - body: { "text": "value1" } - - do: - index: - index: test - type: testtype - id: 2 - body: { "text": "value2 value3" } - - do: - indices.refresh: {} - - - do: - put_template: - id: "1" - body: { "template": { "match": { "text": "{{my_value}}" } } } - - match: { acknowledged: true } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": { "term": { "text": { "value": "{{template}}" } } }, "params": { "template": "value1" } } } } - - - match: { hits.total: 1 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "file": "file_query_template", "params": { "my_value": "value1" } } } } - - - match: { hits.total: 1 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "stored": "1", "params": { "my_value": "value1" } } } } - - - match: { hits.total: 1 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "stored": "1", "params": { "my_value": "value1" } } } } - - - match: { hits.total: 1 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": {"match_{{template}}": {}}, "params" : { "template" : "all" } } } } - - - match: { hits.total: 2 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": "{ \"term\": { \"text\": { \"value\": \"{{template}}\" } } }", "params": { "template": "value1" } } } } - - - match: { hits.total: 1 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": "{\"match_{{template}}\": {}}", "params" : { "template" : "all" } } } } - - - match: { hits.total: 2 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": "{\"match_all\": {}}", "params" : {} } } } - - - match: { hits.total: 2 } - - - do: - warnings: - - '[template] query is deprecated, use search template api instead' - search: - body: { "query": { "template": { "inline": "{\"query_string\": { \"query\" : \"{{query}}\" }}", "params" : { "query" : "text:\"value2 value3\"" } } } } - - match: { hits.total: 1 } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java index 2ffe9afadf3..3a0b322d5ea 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java @@ -38,6 +38,7 @@ import java.security.PrivilegedAction; import static java.lang.invoke.MethodHandles.Lookup; import static org.elasticsearch.painless.Compiler.Loader; import static org.elasticsearch.painless.WriterConstants.CLASS_VERSION; +import static org.elasticsearch.painless.WriterConstants.CTOR_METHOD_NAME; import static org.elasticsearch.painless.WriterConstants.DELEGATE_BOOTSTRAP_HANDLE; import static org.objectweb.asm.Opcodes.ACC_FINAL; import static org.objectweb.asm.Opcodes.ACC_PRIVATE; @@ -89,9 +90,13 @@ import static org.objectweb.asm.Opcodes.H_NEWINVOKESPECIAL; * public static final class $$Lambda0 implements Consumer { * private List arg$0; * - * public $$Lambda0(List arg$0) { + * private $$Lambda0(List arg$0) { * this.arg$0 = arg$0; * } + * + * public static Consumer create$lambda(List arg$0) { + * return new $$Lambda0(arg$0); + * } * * public void accept(Object val$0) { * Painless$Script.lambda$0(this.arg$0, val$0); @@ -120,10 +125,17 @@ import static org.objectweb.asm.Opcodes.H_NEWINVOKESPECIAL; * on whether or not there are captures. If there are no captures * the same instance of the generated lambda class will be * returned each time by the factory method as there are no - * changing values other than the arguments. If there are - * captures a new instance of the generated lambda class will - * be returned each time with the captures passed into the + * changing values other than the arguments, the lambda is a singleton. + * If there are captures, a new instance of the generated lambda class + * will be returned each time with the captures passed into the * factory method to be stored in the member fields. + * Instead of calling the ctor, a static factory method is created + * in the lambda class, because a method handle to the ctor directly + * is (currently) preventing Hotspot optimizer from correctly doing + * escape analysis. Escape analysis is important to optimize the + * code in a way, that a new instance is not created on each lambda + * invocation with captures, stressing garbage collector (thanks + * to Rémi Forax for the explanation about this on Jaxcon 2017!). */ public final class LambdaBootstrap { @@ -153,6 +165,11 @@ public final class LambdaBootstrap { */ private static final String DELEGATED_CTOR_WRAPPER_NAME = "delegate$ctor"; + /** + * This method name is used to generate the static factory for capturing lambdas. + */ + private static final String LAMBDA_FACTORY_METHOD_NAME = "create$lambda"; + /** * Generates a lambda class for a lambda function/method reference * within a Painless script. Variables with the prefix interface are considered @@ -198,7 +215,8 @@ public final class LambdaBootstrap { // Handles the special case where a method reference refers to a ctor (we need a static wrapper method): if (delegateInvokeType == H_NEWINVOKESPECIAL) { - generateStaticCtorDelegator(cw, delegateClassType, delegateMethodName, delegateMethodType); + assert CTOR_METHOD_NAME.equals(delegateMethodName); + generateStaticCtorDelegator(cw, ACC_PRIVATE, DELEGATED_CTOR_WRAPPER_NAME, delegateClassType, delegateMethodType); // replace the delegate with our static wrapper: delegateMethodName = DELEGATED_CTOR_WRAPPER_NAME; delegateClassType = lambdaClassType; @@ -281,16 +299,15 @@ public final class LambdaBootstrap { MethodType factoryMethodType, Capture[] captures) { - String conName = ""; String conDesc = factoryMethodType.changeReturnType(void.class).toMethodDescriptorString(); - Method conMeth = new Method(conName, conDesc); + Method conMeth = new Method(CTOR_METHOD_NAME, conDesc); Type baseConType = Type.getType(Object.class); - Method baseConMeth = new Method(conName, + Method baseConMeth = new Method(CTOR_METHOD_NAME, MethodType.methodType(void.class).toMethodDescriptorString()); - int modifiers = ACC_PUBLIC; + int modifiers = (captures.length > 0) ? ACC_PRIVATE : ACC_PUBLIC; GeneratorAdapter constructor = new GeneratorAdapter(modifiers, conMeth, - cw.visitMethod(modifiers, conName, conDesc, null, null)); + cw.visitMethod(modifiers, CTOR_METHOD_NAME, conDesc, null, null)); constructor.visitCode(); constructor.loadThis(); constructor.invokeConstructor(baseConType, baseConMeth); @@ -304,21 +321,31 @@ public final class LambdaBootstrap { constructor.returnValue(); constructor.endMethod(); + + // Add a factory method, if lambda takes captures. + // @uschindler says: I talked with Rémi Forax about this. Technically, a plain ctor + // and a MethodHandle to the ctor would be enough - BUT: Hotspot is unable to + // do escape analysis through a MethodHandles.findConstructor generated handle. + // Because of this we create a factory method. With this factory method, the + // escape analysis can figure out that everything is final and we don't need + // an instance, so it can omit object creation on heap! + if (captures.length > 0) { + generateStaticCtorDelegator(cw, ACC_PUBLIC, LAMBDA_FACTORY_METHOD_NAME, lambdaClassType, factoryMethodType); + } } /** - * Generates a factory method to delegate to constructors using - * {@code INVOKEDYNAMIC} using the {@link #delegateBootstrap} type converter. + * Generates a factory method to delegate to constructors. */ - private static void generateStaticCtorDelegator(ClassWriter cw, Type delegateClassType, String delegateMethodName, - MethodType delegateMethodType) { - Method wrapperMethod = new Method(DELEGATED_CTOR_WRAPPER_NAME, delegateMethodType.toMethodDescriptorString()); + private static void generateStaticCtorDelegator(ClassWriter cw, int access, String delegatorMethodName, + Type delegateClassType, MethodType delegateMethodType) { + Method wrapperMethod = new Method(delegatorMethodName, delegateMethodType.toMethodDescriptorString()); Method constructorMethod = - new Method(delegateMethodName, delegateMethodType.changeReturnType(void.class).toMethodDescriptorString()); - int modifiers = ACC_PRIVATE | ACC_STATIC; + new Method(CTOR_METHOD_NAME, delegateMethodType.changeReturnType(void.class).toMethodDescriptorString()); + int modifiers = access | ACC_STATIC; GeneratorAdapter factory = new GeneratorAdapter(modifiers, wrapperMethod, - cw.visitMethod(modifiers, DELEGATED_CTOR_WRAPPER_NAME, delegateMethodType.toMethodDescriptorString(), null, null)); + cw.visitMethod(modifiers, delegatorMethodName, delegateMethodType.toMethodDescriptorString(), null, null)); factory.visitCode(); factory.newInstance(delegateClassType); factory.dup(); @@ -329,7 +356,8 @@ public final class LambdaBootstrap { } /** - * Generates the interface method that will delegate (call) to the delegate method. + * Generates the interface method that will delegate (call) to the delegate method + * with {@code INVOKEDYNAMIC} using the {@link #delegateBootstrap} type converter. */ private static void generateInterfaceMethod( ClassWriter cw, @@ -464,8 +492,7 @@ public final class LambdaBootstrap { try { return new ConstantCallSite( - lookup.findConstructor(lambdaClass, factoryMethodType.changeReturnType(void.class)) - .asType(factoryMethodType)); + lookup.findStatic(lambdaClass, LAMBDA_FACTORY_METHOD_NAME, factoryMethodType)); } catch (ReflectiveOperationException exception) { throw new IllegalStateException("unable to create lambda class", exception); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index 6b9a71a752b..af9f84424b4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -53,8 +53,10 @@ public final class WriterConstants { public static final String CLASS_NAME = BASE_CLASS_NAME + "$Script"; public static final Type CLASS_TYPE = Type.getObjectType(CLASS_NAME.replace('.', '/')); + + public static final String CTOR_METHOD_NAME = ""; - public static final Method CONSTRUCTOR = getAsmMethod(void.class, "", String.class, String.class, BitSet.class); + public static final Method CONSTRUCTOR = getAsmMethod(void.class, CTOR_METHOD_NAME, String.class, String.class, BitSet.class); public static final Method CLINIT = getAsmMethod(void.class, ""); // All of these types are caught by the main method and rethrown as ScriptException @@ -162,7 +164,7 @@ public final class WriterConstants { public static final Type STRING_TYPE = Type.getType(String.class); public static final Type STRINGBUILDER_TYPE = Type.getType(StringBuilder.class); - public static final Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, ""); + public static final Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, CTOR_METHOD_NAME); public static final Method STRINGBUILDER_APPEND_BOOLEAN = getAsmMethod(StringBuilder.class, "append", boolean.class); public static final Method STRINGBUILDER_APPEND_CHAR = getAsmMethod(StringBuilder.class, "append", char.class); public static final Method STRINGBUILDER_APPEND_INT = getAsmMethod(StringBuilder.class, "append", int.class); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java similarity index 98% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java rename to modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 3ba07ea5538..2a23823c858 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; @@ -31,7 +31,7 @@ import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.Retry; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.ParentTaskAssigningClient; @@ -77,7 +77,7 @@ import static java.lang.Math.min; import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff; -import static org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; +import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; import static org.elasticsearch.rest.RestStatus.CONFLICT; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; @@ -116,8 +116,8 @@ public abstract class AbstractAsyncBulkByScrollAction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; public AbstractAsyncBulkByScrollAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, ScriptService scriptService, ClusterState clusterState, - ActionListener listener) { + ThreadPool threadPool, Request mainRequest, ScriptService scriptService, + ClusterState clusterState, ActionListener listener) { this(task, logger, client, threadPool, mainRequest, scriptService, clusterState, listener, client.settings()); } @@ -741,7 +741,7 @@ public abstract class AbstractAsyncBulkByScrollAction wrap(DeleteRequest request) { + public static RequestWrapper wrap(DeleteRequest request) { return new DeleteRequestWrapper(request); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index d70b3c9c4ce..64b02c4be81 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.GenericAction; -import org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequest; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index 480ca80e2ee..32a252ccc4b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.reindex; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.GenericAction; -import org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequest; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -35,7 +33,7 @@ import java.io.IOException; import java.util.Map; import java.util.function.Consumer; -import static org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; +import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; /** * Rest handler for reindex actions that accepts a search request like Update-By-Query or Delete-By-Query diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java similarity index 91% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java rename to modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java index cdcfb754fb6..2608f5715ba 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; @@ -32,8 +32,8 @@ import org.elasticsearch.threadpool.ThreadPool; */ public class AsyncDeleteByQueryAction extends AbstractAsyncBulkByScrollAction { public AsyncDeleteByQueryAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, DeleteByQueryRequest request, ScriptService scriptService, ClusterState clusterState, - ActionListener listener) { + ThreadPool threadPool, DeleteByQueryRequest request, ScriptService scriptService, + ClusterState clusterState, ActionListener listener) { super(task, logger, client, threadPool, request, scriptService, clusterState, listener); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java similarity index 88% rename from core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java rename to modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java index f2bd62c2335..48f10306454 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -32,13 +32,13 @@ import org.elasticsearch.tasks.TaskManager; /** * Helps parallelize reindex requests using sliced scrolls. */ -public class BulkByScrollParallelizationHelper { +class BulkByScrollParallelizationHelper { private BulkByScrollParallelizationHelper() {} - public static < - Request extends AbstractBulkByScrollRequest - > void startSlices(Client client, TaskManager taskManager, Action action, - String localNodeId, ParentBulkByScrollTask task, Request request, ActionListener listener) { + public static > void startSlices(Client client, TaskManager taskManager, + Action action, + String localNodeId, ParentBulkByScrollTask task, Request request, + ActionListener listener) { TaskId parentTaskId = new TaskId(localNodeId, task.getId()); for (final SearchRequest slice : sliceIntoSubRequests(request.getSearchRequest(), UidFieldMapper.NAME, request.getSlices())) { // TODO move the request to the correct node. maybe here or somehow do it as part of startup for reindex in general.... diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java index a8d321a9fab..8e5dff170d4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java @@ -21,8 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BytesRestResponse; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java index 2c84cfc86be..1c53a925f0d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.Action; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.client.ElasticsearchClient; public class ReindexAction extends Action { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index fb203ee5c6d..d601f5c06e7 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java index 0e11d64a405..f906ef7660d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.bulk.byscroll.DeleteByQueryRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 57d3213da92..6c16c31efb1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.Script; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java index a17b2b81f91..99e1a9f166d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java @@ -20,12 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.byscroll.AsyncDeleteByQueryAction; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollParallelizationHelper; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.DeleteByQueryRequest; -import org.elasticsearch.action.bulk.byscroll.ParentBulkByScrollTask; -import org.elasticsearch.action.bulk.byscroll.WorkingBulkByScrollTask; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index b232c50c2b2..737d885443a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -37,13 +37,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollAction; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollParallelizationHelper; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.ParentBulkByScrollTask; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; -import org.elasticsearch.action.bulk.byscroll.WorkingBulkByScrollTask; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; @@ -68,7 +62,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.VersionFieldMapper; -import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java index 88329f5cb17..0901e5ade31 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.client.Client; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 12d8696319f..8924c7038c9 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -21,12 +21,6 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollAction; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.ParentBulkByScrollTask; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollParallelizationHelper; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource; -import org.elasticsearch.action.bulk.byscroll.WorkingBulkByScrollTask; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 1cd1df230a4..50769cc9231 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -43,7 +43,15 @@ import java.util.HashMap; import java.util.Map; import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +/** + * Builds requests for remote version of Elasticsearch. Note that unlike most of the + * rest of Elasticsearch this file needs to be compatible with very old versions of + * Elasticsearch. Thus is often uses identifiers for versions like {@code 2000099} + * for {@code 2.0.0-alpha1}. Do not drop support for features from this file just + * because the version constants have been removed. + */ final class RemoteRequestBuilders { private RemoteRequestBuilders() {} @@ -59,7 +67,14 @@ final class RemoteRequestBuilders { static Map initialSearchParams(SearchRequest searchRequest, Version remoteVersion) { Map params = new HashMap<>(); if (searchRequest.scroll() != null) { - params.put("scroll", searchRequest.scroll().keepAlive().getStringRep()); + TimeValue keepAlive = searchRequest.scroll().keepAlive(); + if (remoteVersion.before(Version.V_5_0_0)) { + /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros + * so we toss out that resolution, rounding up because more scroll + * timeout seems safer than less. */ + keepAlive = timeValueMillis((long) Math.ceil(keepAlive.millisFrac())); + } + params.put("scroll", keepAlive.getStringRep()); } params.put("size", Integer.toString(searchRequest.source().size())); if (searchRequest.source().version() == null || searchRequest.source().version() == true) { @@ -93,6 +108,12 @@ final class RemoteRequestBuilders { if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 need prompting to return interesting fields. Note that timestamp isn't available at all.... searchRequest.source().storedField("_parent").storedField("_routing").storedField("_ttl"); + if (remoteVersion.before(Version.fromId(1000099))) { + // Versions before 1.0.0 don't support `"_source": true` so we have to ask for the _source in a funny way. + if (false == searchRequest.source().storedFields().fieldNames().contains("_source")) { + searchRequest.source().storedField("_source"); + } + } } if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().fieldNames().isEmpty()) { StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().fieldNames().get(0)); @@ -105,7 +126,7 @@ final class RemoteRequestBuilders { return params; } - static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReference query) { + static HttpEntity initialSearchEntity(SearchRequest searchRequest, BytesReference query, Version remoteVersion) { // EMPTY is safe here because we're not calling namedObject try (XContentBuilder entity = JsonXContent.contentBuilder(); XContentParser queryParser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, query)) { @@ -125,7 +146,10 @@ final class RemoteRequestBuilders { if (searchRequest.source().fetchSource() != null) { entity.field("_source", searchRequest.source().fetchSource()); } else { - entity.field("_source", true); + if (remoteVersion.onOrAfter(Version.fromId(1000099))) { + // Versions before 1.0 don't support `"_source": true` so we have to ask for the source as a stored field. + entity.field("_source", true); + } } entity.endObject(); @@ -167,7 +191,13 @@ final class RemoteRequestBuilders { return "/_search/scroll"; } - static Map scrollParams(TimeValue keepAlive) { + static Map scrollParams(TimeValue keepAlive, Version remoteVersion) { + if (remoteVersion.before(Version.V_5_0_0)) { + /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros + * so we toss out that resolution, rounding up so we shouldn't end up + * with 0s. */ + keepAlive = timeValueMillis((long) Math.ceil(keepAlive.millisFrac())); + } return singletonMap("scroll", keepAlive.getStringRep()); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java index e9807bdfa5b..d9a897026d2 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -20,10 +20,10 @@ package org.elasticsearch.index.reindex.remote; import org.elasticsearch.Version; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.BasicHit; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Response; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.ScrollableHitSource.BasicHit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.collect.Tuple; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 6b7b6ca3aa0..85173b7d899 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -30,7 +30,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource; +import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseListener; @@ -87,7 +87,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { lookupRemoteVersion(version -> { remoteVersion = version; execute("POST", initialSearchPath(searchRequest), initialSearchParams(searchRequest, version), - initialSearchEntity(searchRequest, query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r)); + initialSearchEntity(searchRequest, query, remoteVersion), RESPONSE_PARSER, r -> onStartResponse(onResponse, r)); }); } @@ -106,8 +106,10 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse) { - execute("POST", scrollPath(), scrollParams(timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos())), - scrollEntity(scrollId, remoteVersion), RESPONSE_PARSER, onResponse); + Map scrollParams = scrollParams( + timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()), + remoteVersion); + execute("POST", scrollPath(), scrollParams, scrollEntity(scrollId, remoteVersion), RESPONSE_PARSER, onResponse); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionMetadataTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java similarity index 96% rename from test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionMetadataTestCase.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java index b68797381d1..34da9f56b48 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionMetadataTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionMetadataTestCase.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; public abstract class AbstractAsyncBulkByScrollActionMetadataTestCase< Request extends AbstractBulkByScrollRequest, diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java index fd41a6d25f3..6ddf6daa880 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java @@ -20,12 +20,8 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollAction; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollAction.OpType; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollAction.RequestWrapper; -import org.elasticsearch.action.bulk.byscroll.AbstractAsyncBulkByScrollActionTestCase; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource; +import org.elasticsearch.index.reindex.AbstractAsyncBulkByScrollAction.OpType; +import org.elasticsearch.index.reindex.AbstractAsyncBulkByScrollAction.RequestWrapper; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.script.CompiledScript; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index fa42573e439..5c437da3464 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -36,8 +36,8 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -696,7 +696,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } @Override - protected DummyAbstractBulkByScrollRequest forSlice(TaskId slicingTask, SearchRequest slice) { + public DummyAbstractBulkByScrollRequest forSlice(TaskId slicingTask, SearchRequest slice) { throw new UnsupportedOperationException(); } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelperTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelperTests.java similarity index 94% rename from core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelperTests.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelperTests.java index 498c6bf5286..a64415d08b1 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelperTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelperTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -27,7 +27,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static java.util.Collections.emptyList; -import static org.elasticsearch.action.bulk.byscroll.BulkByScrollParallelizationHelper.sliceIntoSubRequests; +import static org.elasticsearch.index.reindex.BulkByScrollParallelizationHelper.sliceIntoSubRequests; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchSourceBuilder; diff --git a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkIndexByScrollResponseMatcher.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java similarity index 98% rename from test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkIndexByScrollResponseMatcher.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java index 2902e02a30c..cb2ff1a7ae2 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkIndexByScrollResponseMatcher.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java @@ -17,9 +17,8 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; -import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; @@ -155,4 +154,4 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher params, TimeValue requested) { + if (remoteVersion.before(Version.V_5_0_0)) { + // Versions of Elasticsearch prior to 5.0 can't parse nanos or micros in TimeValue. + assertThat(params.get("scroll"), not(either(endsWith("nanos")).or(endsWith("micros")))); + if (requested.getStringRep().endsWith("nanos") || requested.getStringRep().endsWith("micros")) { + long millis = (long) Math.ceil(requested.millisFrac()); + assertEquals(TimeValue.parseTimeValue(params.get("scroll"), "scroll"), timeValueMillis(millis)); + return; + } + } + assertEquals(requested, TimeValue.parseTimeValue(params.get("scroll"), "scroll")); + } + public void testInitialSearchEntity() throws IOException { + Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); + SearchRequest searchRequest = new SearchRequest(); searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; - HttpEntity entity = initialSearchEntity(searchRequest, new BytesArray(query)); + HttpEntity entity = initialSearchEntity(searchRequest, new BytesArray(query), remoteVersion); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); - assertEquals("{\"query\":" + query + ",\"_source\":true}", - Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + if (remoteVersion.onOrAfter(Version.fromId(1000099))) { + assertEquals("{\"query\":" + query + ",\"_source\":true}", + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + } else { + assertEquals("{\"query\":" + query + "}", + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + } // Source filtering is included if set up searchRequest.source().fetchSource(new String[] {"in1", "in2"}, new String[] {"out"}); - entity = initialSearchEntity(searchRequest, new BytesArray(query)); + entity = initialSearchEntity(searchRequest, new BytesArray(query), remoteVersion); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertEquals("{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); // Invalid XContent fails RuntimeException e = expectThrows(RuntimeException.class, - () -> initialSearchEntity(searchRequest, new BytesArray("{}, \"trailing\": {}"))); + () -> initialSearchEntity(searchRequest, new BytesArray("{}, \"trailing\": {}"), remoteVersion)); assertThat(e.getCause().getMessage(), containsString("Unexpected character (',' (code 44))")); - e = expectThrows(RuntimeException.class, () -> initialSearchEntity(searchRequest, new BytesArray("{"))); + e = expectThrows(RuntimeException.class, () -> initialSearchEntity(searchRequest, new BytesArray("{"), remoteVersion)); assertThat(e.getCause().getMessage(), containsString("Unexpected end-of-input")); } public void testScrollParams() { + Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); TimeValue scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test"); - assertEquals(scroll, TimeValue.parseTimeValue(scrollParams(scroll).get("scroll"), "scroll")); + assertScroll(remoteVersion, scrollParams(scroll, remoteVersion), scroll); } public void testScrollEntity() throws IOException { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index f63b05e96be..f67a5b627fb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -40,7 +40,7 @@ import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Response; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.HeapBufferedAsyncResponseConsumer; import org.elasticsearch.client.RestClient; diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle new file mode 100644 index 00000000000..7c602d9bc02 --- /dev/null +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'script-expert-scoring' + description 'An example script engine to use low level Lucene internals for expert scoring' + classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin' +} + +test.enabled = false diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java new file mode 100644 index 00000000000..d56e7932499 --- /dev/null +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.expertscript; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Scorer; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.SearchScript; +import org.elasticsearch.search.lookup.SearchLookup; + +/** + * An example script plugin that adds a {@link ScriptEngine} implementing expert scoring. + */ +public class ExpertScriptPlugin extends Plugin implements ScriptPlugin { + + @Override + public ScriptEngine getScriptEngine(Settings settings) { + return new MyExpertScriptEngine(); + } + + /** An example {@link ScriptEngine} that uses Lucene segment details to implement pure document frequency scoring. */ + // tag::expert_engine + private static class MyExpertScriptEngine implements ScriptEngine { + @Override + public String getType() { + return "expert_scripts"; + } + + @Override + public Function,SearchScript> compile(String scriptName, String scriptSource, Map params) { + // we use the script "source" as the script identifier + if ("pure_df".equals(scriptSource)) { + return p -> new SearchScript() { + final String field; + final String term; + { + if (p.containsKey("field") == false) { + throw new IllegalArgumentException("Missing parameter [field]"); + } + if (p.containsKey("term") == false) { + throw new IllegalArgumentException("Missing parameter [term]"); + } + field = p.get("field").toString(); + term = p.get("term").toString(); + } + + @Override + public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { + PostingsEnum postings = context.reader().postings(new Term(field, term)); + if (postings == null) { + // the field and/or term don't exist in this segment, so always return 0 + return () -> 0.0d; + } + return new LeafSearchScript() { + int currentDocid = -1; + @Override + public void setDocument(int docid) { + // advance has undefined behavior calling with a docid <= its current docid + if (postings.docID() < docid) { + try { + postings.advance(docid); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + currentDocid = docid; + } + @Override + public double runAsDouble() { + if (postings.docID() != currentDocid) { + // advance moved past the current doc, so this doc has no occurrences of the term + return 0.0d; + } + try { + return postings.freq(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + }; + } + throw new IllegalArgumentException("Unknown script name " + scriptSource); + } + + @Override + @SuppressWarnings("unchecked") + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map params) { + Function,SearchScript> scriptFactory = (Function,SearchScript>) compiledScript.compiled(); + return scriptFactory.apply(params); + } + + @Override + public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map params) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isInlineScriptEnabled() { + return true; + } + + @Override + public void close() {} + } + // end::expert_engine +} diff --git a/plugins/examples/script-expert-scoring/src/test/java/org/elasticsearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java b/plugins/examples/script-expert-scoring/src/test/java/org/elasticsearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..977ca9cf843 --- /dev/null +++ b/plugins/examples/script-expert-scoring/src/test/java/org/elasticsearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.expertscript; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class ExpertScriptClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public ExpertScriptClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} + diff --git a/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yaml b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yaml new file mode 100644 index 00000000000..b4fafd69dd4 --- /dev/null +++ b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/10_basic.yaml @@ -0,0 +1,13 @@ +# Integration tests for the expert scoring script example plugin +# +"Plugin loaded": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: script-expert-scoring } diff --git a/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/20_score.yaml b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/20_score.yaml new file mode 100644 index 00000000000..1a0134b154b --- /dev/null +++ b/plugins/examples/script-expert-scoring/src/test/resources/rest-api-spec/test/script_expert_scoring/20_score.yaml @@ -0,0 +1,53 @@ +# Integration tests for the expert scoring script example plugin +# +--- +setup: + - do: + indices.create: + index: test + + - do: + index: + index: test + type: test + id: 1 + body: { "important_field": "foo" } + - do: + index: + index: test + type: test + id: 2 + body: { "important_field": "foo foo foo" } + - do: + index: + index: test + type: test + id: 3 + body: { "important_field": "foo foo" } + + - do: + indices.refresh: {} +--- +"document scoring": + - do: + search: + index: test + body: + query: + function_score: + query: + match: + important_field: "foo" + functions: + - script_score: + script: + inline: "pure_df" + lang: "expert_scripts" + params: + field: "important_field" + term: "foo" + + - length: { hits.hits: 3 } + - match: {hits.hits.0._id: "2" } + - match: {hits.hits.1._id: "3" } + - match: {hits.hits.2._id: "1" } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 50f2ac571a4..9db78e5cbb2 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -54,6 +54,16 @@ bundlePlugin { } } +additionalTest('testRepositoryCreds'){ + include '**/RepositorySettingsCredentialsTests.class' + systemProperty 'es.allow_insecure_settings', 'true' +} + +test { + // these are tested explicitly in separate test tasks + exclude '**/*CredentialsTests.class' +} + integTestCluster { keystoreSetting 's3.client.default.access_key', 'myaccesskey' keystoreSetting 's3.client.default.secret_key', 'mysecretkey' diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index c76280f116c..58a130bcd95 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -26,6 +26,7 @@ import java.util.function.Function; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.InstanceProfileCredentialsProvider; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; @@ -35,6 +36,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -69,7 +72,7 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); - AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); ClientConfiguration configuration = buildConfiguration(clientSettings, repositorySettings); client = new AmazonS3Client(credentials, configuration); @@ -106,16 +109,44 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se } // pkg private for tests - static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { - if (clientSettings.credentials == null) { + static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, + S3ClientSettings clientSettings, Settings repositorySettings) { + + + BasicAWSCredentials credentials = clientSettings.credentials; + if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { + if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); + } + try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { + credentials = new BasicAWSCredentials(key.toString(), secret.toString()); + } + // backcompat for reading keys out of repository settings + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); + } + if (credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); } else { logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(clientSettings.credentials); + return new StaticCredentialsProvider(credentials); } } + /** Returns the value for a given setting from the repository, or returns the fallback value. */ + private static T getRepoValue(Settings repositorySettings, Setting repositorySetting, T fallback) { + if (repositorySetting.exists(repositorySettings)) { + return repositorySetting.get(repositorySettings); + } + return fallback; + } + @Override protected void doStart() throws ElasticsearchException { } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index b73848eed60..eeca906ff49 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -21,14 +21,14 @@ package org.elasticsearch.repositories.s3; import java.io.IOException; -import com.amazonaws.ClientConfiguration; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -53,6 +53,12 @@ class S3Repository extends BlobStoreRepository { static final String TYPE = "s3"; + /** The access key to authenticate with s3. This setting is insecure because cluster settings are stored in cluster state */ + static final Setting ACCESS_KEY_SETTING = SecureSetting.insecureString("access_key"); + + /** The secret key to authenticate with s3. This setting is insecure because cluster settings are stored in cluster state */ + static final Setting SECRET_KEY_SETTING = SecureSetting.insecureString("secret_key"); + /** * Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of * the available memory for smaller heaps. diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 85398571dc1..9863402ec37 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -24,10 +24,12 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -35,7 +37,8 @@ public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); - AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings); + AWSCredentialsProvider credentialsProvider = + InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } @@ -44,7 +47,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { secureSettings.setString("s3.client.default.access_key", "aws_key"); secureSettings.setString("s3.client.default.secret_key", "aws_secret"); Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "aws_key", "aws_secret"); + assertCredentials(Settings.EMPTY, settings, "aws_key", "aws_secret"); } public void testAwsCredsExplicitConfigSettings() { @@ -55,14 +58,38 @@ public class AwsS3ServiceImplTests extends ESTestCase { secureSettings.setString("s3.client.default.access_key", "wrong_key"); secureSettings.setString("s3.client.default.secret_key", "wrong_secret"); Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "aws_key", "aws_secret"); + assertCredentials(repositorySettings, settings, "aws_key", "aws_secret"); } - private void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, - String expectedKey, String expectedSecret) { + public void testRepositorySettingsCredentialsDisallowed() { + Settings repositorySettings = Settings.builder() + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); + assertThat(e.getMessage(), containsString("Setting [access_key] is insecure")); + } + + public void testRepositorySettingsCredentialsMissingKey() { + Settings repositorySettings = Settings.builder().put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); + assertThat(e.getMessage(), containsString("must be accompanied by setting [access_key]")); + } + + public void testRepositorySettingsCredentialsMissingSecret() { + Settings repositorySettings = Settings.builder().put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); + assertThat(e.getMessage(), containsString("must be accompanied by setting [secret_key]")); + } + + private void assertCredentials(Settings singleRepositorySettings, Settings settings, + String expectedKey, String expectedSecret) { String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, clientSettings).getCredentials(); + AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, + clientSettings, singleRepositorySettings).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java new file mode 100644 index 00000000000..c3e7069fdfd --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.amazonaws.auth.AWSCredentials; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +public class RepositorySettingsCredentialsTests extends ESTestCase { + + public void testRepositorySettingsCredentials() { + Settings repositorySettings = Settings.builder() + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); + AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, + S3ClientSettings.getClientSettings(Settings.EMPTY, "default"), repositorySettings).getCredentials(); + assertEquals("aws_key", credentials.getAWSAccessKeyId()); + assertEquals("aws_secret", credentials.getAWSSecretKey()); + assertSettingDeprecationsAndWarnings(new Setting[] { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING }, + "Using s3 access/secret key from repository settings. " + + "Instead store these in named clients and the elasticsearch keystore for secure settings."); + } +} diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle new file mode 100644 index 00000000000..72927842e03 --- /dev/null +++ b/qa/reindex-from-old/build.gradle @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +description = """\ +Tests reindex-from-remote against some specific versions of +Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0 +should be able to use the standard launching mechanism which +is more flexible and reliable. +""" + +import org.apache.tools.ant.taskdefs.condition.Os + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +integTestCluster { + // Whitelist reindexing from the local node so we can test it. + setting 'reindex.remote.whitelist', '127.0.0.1:*' +} + +configurations { + oldesFixture + es2 + es1 + es090 +} + +dependencies { + oldesFixture project(':test:fixtures:old-elasticsearch') + /* Right now we just test against the latest version of each major we expect + * reindex-from-remote to work against. We could randomize the versions but + * that doesn't seem worth it at this point. */ + es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' + es1 'org.elasticsearch:elasticsearch:1.7.6@zip' + es090 'org.elasticsearch:elasticsearch:0.90.13@zip' +} + +if (project.javaVersion == JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { + /* We can't run the dependencies with Java 9 so for now we'll skip the whole + * thing. We can't get the pid files in windows so we skip that as well.... */ + integTest.enabled = false +} else { + /* Set up tasks to unzip and run the old versions of ES before running the + * integration tests. */ + for (String version : ['2', '1', '090']) { + Task unzip = task("unzipEs${version}", type: Sync) { + Configuration oldEsDependency = configurations['es' + version] + dependsOn oldEsDependency + /* Use a closure here to delay resolution of the dependency until we need + * it */ + from { + oldEsDependency.collect { zipTree(it) } + } + into temporaryDir + } + Task fixture = task("oldEs${version}Fixture", + type: org.elasticsearch.gradle.test.AntFixture) { + dependsOn project.configurations.oldesFixture + dependsOn unzip + executable = new File(project.javaHome, 'bin/java') + env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + args 'oldes.OldElasticsearch', + baseDir, + unzip.temporaryDir, + version == '090' + } + integTest.dependsOn fixture + integTestRunner { + /* Use a closure on the string to delay evaluation until right before we + * run the integration tests so that we can be sure that the file is + * ready. */ + systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + } + } +} diff --git a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java b/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java new file mode 100644 index 00000000000..162e68e4027 --- /dev/null +++ b/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.smoketest; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; + +public class ReindexFromOldRemoteIT extends ESRestTestCase { + private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException { + int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName)); + try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) { + try { + HttpEntity entity = new StringEntity("{\"settings\":{\"number_of_shards\": 1}}", ContentType.APPLICATION_JSON); + oldEs.performRequest("PUT", "/test", singletonMap("refresh", "true"), entity); + + entity = new StringEntity("{\"test\":\"test\"}", ContentType.APPLICATION_JSON); + oldEs.performRequest("PUT", "/test/doc/testdoc1", singletonMap("refresh", "true"), entity); + oldEs.performRequest("PUT", "/test/doc/testdoc2", singletonMap("refresh", "true"), entity); + oldEs.performRequest("PUT", "/test/doc/testdoc3", singletonMap("refresh", "true"), entity); + oldEs.performRequest("PUT", "/test/doc/testdoc4", singletonMap("refresh", "true"), entity); + oldEs.performRequest("PUT", "/test/doc/testdoc5", singletonMap("refresh", "true"), entity); + + entity = new StringEntity( + "{\n" + + " \"source\":{\n" + + " \"index\": \"test\",\n" + + " \"size\": 1,\n" + + " \"remote\": {\n" + + " \"host\": \"http://127.0.0.1:" + oldEsPort + "\"\n" + + " }\n" + + " },\n" + + " \"dest\": {\n" + + " \"index\": \"test\"\n" + + " }\n" + + "}", + ContentType.APPLICATION_JSON); + Map params = new TreeMap<>(); + params.put("refresh", "true"); + params.put("pretty", "true"); + if (requestsPerSecond != null) { + params.put("requests_per_second", requestsPerSecond); + } + client().performRequest("POST", "/_reindex", params, entity); + + Response response = client().performRequest("POST", "test/_search", singletonMap("pretty", "true")); + String result = EntityUtils.toString(response.getEntity()); + assertThat(result, containsString("\"_id\" : \"testdoc1\"")); + } finally { + oldEs.performRequest("DELETE", "/test"); + } + } + } + + public void testEs2() throws IOException { + oldEsTestCase("es2.port", null); + } + + public void testEs1() throws IOException { + oldEsTestCase("es1.port", null); + } + + public void testEs090() throws IOException { + oldEsTestCase("es090.port", null); + } + + public void testEs2WithFunnyThrottle() throws IOException { + oldEsTestCase("es2.port", "11"); // 11 requests per second should give us a nice "funny" number on the scroll timeout + } + + public void testEs1WithFunnyThrottle() throws IOException { + oldEsTestCase("es1.port", "11"); // 11 requests per second should give us a nice "funny" number on the scroll timeout + } + + public void testEs090WithFunnyThrottle() throws IOException { + oldEsTestCase("es090.port", "11"); // 11 requests per second should give us a nice "funny" number on the scroll timeout + } + +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml index 1695bdb2352..ac66af0095e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml @@ -1,5 +1,5 @@ --- -"Sliced scroll": +setup: - do: indices.create: index: test_sliced_scroll @@ -8,31 +8,101 @@ index: index: test_sliced_scroll type: test - id: 42 + id: 1 body: { foo: 1 } + - do: + index: + index: test_sliced_scroll + type: test + id: 2 + body: { foo: 2 } + + - do: + index: + index: test_sliced_scroll + type: test + id: 3 + body: { foo: 3 } + + - do: + index: + index: test_sliced_scroll + type: test + id: 4 + body: { foo: 4 } + - do: indices.refresh: {} +--- +"Sliced scroll": + - skip: + version: " - 5.3.0" + reason: Prior version uses a random seed per node to compute the hash of the keys. + - do: search: index: test_sliced_scroll - size: 1 scroll: 1m sort: foo body: slice: id: 0 - max: 3 + max: 2 query: match_all: {} - set: {_scroll_id: scroll_id} + - match: {hits.total: 3 } + - length: {hits.hits: 3 } + - match: {hits.hits.0._id: "2" } + - match: {hits.hits.1._id: "3" } + - match: {hits.hits.2._id: "4" } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 3 } + - length: {hits.hits: 0 } + + - do: + clear_scroll: + scroll_id: $scroll_id + + - do: + search: + index: test_sliced_scroll + scroll: 1m + sort: foo + body: + slice: + id: 1 + max: 2 + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: {hits.total: 1 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "1" } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 1 } + - length: {hits.hits: 0 } - do: clear_scroll: scroll_id: $scroll_id +--- +"Sliced scroll with invalid arguments": - do: catch: /query_phase_execution_exception.*The number of slices.*index.max_slices_per_scroll/ search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml index 63deebcd870..4955dcfb4da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml @@ -10,6 +10,8 @@ setup: "properties": "number": "type" : "integer" + "date": + "type" : "date" - do: cluster.health: wait_for_status: green @@ -143,3 +145,64 @@ setup: - match: { aggregations.histo.buckets.3.key_as_string: "Value is 150.0" } - match: { aggregations.histo.buckets.3.doc_count: 1 } + +--- +"Deprecated _time order": + + - skip: + version: " - 5.99.99" + reason: _time order deprecated in 6.0, replaced by _key + features: "warnings" + + - do: + index: + index: test_1 + type: test + id: 1 + body: { "date" : "2016-01-01" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "date" : "2016-01-02" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "date" : "2016-02-01" } + + - do: + index: + index: test_1 + type: test + id: 4 + body: { "date" : "2016-03-01" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "interval" : "month", "order" : { "_time" : "desc" } } } } } + warnings: + - "Deprecated aggregation order key [_time] used, replaced by [_key]" + + - match: { hits.total: 4 } + + - length: { aggregations.histo.buckets: 3 } + + - match: { aggregations.histo.buckets.0.key_as_string: "2016-03-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.0.doc_count: 1 } + + - match: { aggregations.histo.buckets.1.key_as_string: "2016-02-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.1.doc_count: 1 } + + - match: { aggregations.histo.buckets.2.key_as_string: "2016-01-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.2.doc_count: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index c9ba94cf615..9cc30bbcd1b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -747,3 +747,57 @@ setup: - match: { aggregations.number_terms.buckets.2.key: 14.6 } - match: { aggregations.number_terms.buckets.2.doc_count: 1 } + +--- +"Deprecated _term order": + + - skip: + version: " - 5.99.99" + reason: _term order deprecated in 6.0, replaced by _key + features: "warnings" + + - do: + index: + index: test_1 + type: test + id: 1 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "order" : { "_term" : "desc" } } } } } + warnings: + - "Deprecated aggregation order key [_term] used, replaced by [_key]" + + - match: { hits.total: 3 } + + - length: { aggregations.str_terms.buckets: 2 } + + - match: { aggregations.str_terms.buckets.0.key: "bcd" } + + - is_false: aggregations.str_terms.buckets.0.key_as_string + + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + + - match: { aggregations.str_terms.buckets.1.key: "abc" } + + - is_false: aggregations.str_terms.buckets.1.key_as_string + + - match: { aggregations.str_terms.buckets.1.doc_count: 2 } diff --git a/settings.gradle b/settings.gradle index 6c18f1c1efb..7acedd9c98b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -26,6 +26,7 @@ List projects = [ 'test:fixtures:example-fixture', 'test:fixtures:hdfs-fixture', 'test:fixtures:krb5kdc-fixture', + 'test:fixtures:old-elasticsearch', 'test:logger-usage', 'modules:aggs-matrix-stats', 'modules:analysis-common', @@ -63,6 +64,7 @@ List projects = [ 'qa:evil-tests', 'qa:multi-cluster-search', 'qa:no-bootstrap-tests', + 'qa:reindex-from-old', 'qa:rolling-upgrade', 'qa:smoke-test-client', 'qa:smoke-test-http', @@ -76,6 +78,15 @@ List projects = [ 'qa:wildfly' ] +File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') +List examplePlugins = [] +for (File example : examplePluginsDir.listFiles()) { + if (example.isDirectory() == false) continue; + if (example.name.startsWith('build') || example.name.startsWith('.')) continue; + projects.add("example-plugins:${example.name}".toString()) + examplePlugins.add(example.name) +} + boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects @@ -86,6 +97,11 @@ if (isEclipse) { include projects.toArray(new String[0]) project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc') +project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') + +for (String example : examplePlugins) { + project(":example-plugins:${example}").projectDir = new File(rootProject.projectDir, "plugins/examples/${example}") +} if (isEclipse) { project(":core").projectDir = new File(rootProject.projectDir, 'core/src/main') diff --git a/test/fixtures/old-elasticsearch/build.gradle b/test/fixtures/old-elasticsearch/build.gradle new file mode 100644 index 00000000000..5cfc02bbba3 --- /dev/null +++ b/test/fixtures/old-elasticsearch/build.gradle @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +description = """\ +Launches versions of Elasticsearch prior to 5.0 for testing. +These need special handling because they do not support writing +a "ports" file with the port on which Elasticsearch is running. +""" + +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + // Just for the constants.... + compile "org.apache.lucene:lucene-core:${versions.lucene}" +} diff --git a/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java b/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java new file mode 100644 index 00000000000..bd4c3f8ccd1 --- /dev/null +++ b/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package oldes; + +import org.apache.lucene.util.Constants; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Starts a version of Elasticsearch that has been unzipped into an empty directory, + * instructing it to ask the OS for an unused port, grepping the logs for the port + * it actually got, and writing a {@code ports} file with the port. This is only + * required for versions of Elasticsearch before 5.0 because they do not support + * writing a "ports" file. + */ +public class OldElasticsearch { + public static void main(String[] args) throws IOException { + Path baseDir = Paths.get(args[0]); + Path unzipDir = Paths.get(args[1]); + + // 0.90 must be explicitly foregrounded + boolean explicitlyForeground; + switch (args[2]) { + case "true": + explicitlyForeground = true; + break; + case "false": + explicitlyForeground = false; + break; + default: + System.err.println("the third argument must be true or false"); + System.exit(1); + return; + } + + Iterator children = Files.list(unzipDir).iterator(); + if (false == children.hasNext()) { + System.err.println("expected the es directory to contain a single child directory but contained none."); + System.exit(1); + } + Path esDir = children.next(); + if (children.hasNext()) { + System.err.println("expected the es directory to contains a single child directory but contained [" + esDir + "] and [" + + children.next() + "]."); + System.exit(1); + } + if (false == Files.isDirectory(esDir)) { + System.err.println("expected the es directory to contains a single child directory but contained a single child file."); + System.exit(1); + } + + Path bin = esDir.resolve("bin").resolve("elasticsearch" + (Constants.WINDOWS ? ".bat" : "")); + Path config = esDir.resolve("config").resolve("elasticsearch.yml"); + + Files.write(config, Arrays.asList("http.port: 0", "transport.tcp.port: 0", "network.host: 127.0.0.1"), StandardCharsets.UTF_8); + + List command = new ArrayList<>(); + command.add(bin.toString()); + if (explicitlyForeground) { + command.add("-f"); + } + command.add("-p"); + command.add("../pid"); + ProcessBuilder subprocess = new ProcessBuilder(command); + Process process = subprocess.start(); + System.out.println("Running " + command); + + int pid = 0; + int port = 0; + + Pattern pidPattern = Pattern.compile("pid\\[(\\d+)\\]"); + Pattern httpPortPattern = Pattern.compile("\\[http\\s+\\].+bound_address.+127\\.0\\.0\\.1:(\\d+)"); + try (BufferedReader stdout = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { + String line; + while ((line = stdout.readLine()) != null && (pid == 0 || port == 0)) { + System.out.println(line); + Matcher m = pidPattern.matcher(line); + if (m.find()) { + pid = Integer.parseInt(m.group(1)); + System.out.println("Found pid: " + pid); + continue; + } + m = httpPortPattern.matcher(line); + if (m.find()) { + port = Integer.parseInt(m.group(1)); + System.out.println("Found port: " + port); + continue; + } + } + } + + if (port == 0) { + System.err.println("port not found"); + System.exit(1); + } + + Path tmp = Files.createTempFile(baseDir, null, null); + Files.write(tmp, Integer.toString(port).getBytes(StandardCharsets.UTF_8)); + Files.move(tmp, baseDir.resolve("ports"), StandardCopyOption.ATOMIC_MOVE); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java similarity index 97% rename from test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionTestCase.java rename to test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java index 907f8965632..b4e01b18a56 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.bulk.byscroll; +package org.elasticsearch.index.reindex; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.tasks.TaskId; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 392dcf1542a..178eed2dcef 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -51,7 +51,7 @@ public abstract class AbstractSerializingTestCase exten return deserializedInstance; } - private T copyInstance(T instance) throws IOException { + protected T copyInstance(T instance) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { instance.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), @@ -112,9 +112,9 @@ public abstract class AbstractWireSerializingTestCase exten /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + * * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + * * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index ed8f271bab1..56f898b9009 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -72,6 +72,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; @@ -139,8 +140,11 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; /** * Base testcase for randomized unit testing with Elasticsearch @@ -335,7 +339,7 @@ public abstract class ESTestCase extends LuceneTestCase { final Set actualWarningValues = actualWarnings.stream().map(DeprecationLogger::extractWarningValueFromWarningHeader).collect(Collectors.toSet()); for (String msg : expectedWarnings) { - assertTrue(msg, actualWarningValues.contains(DeprecationLogger.escape(msg))); + assertThat(actualWarningValues, hasItem(DeprecationLogger.escape(msg))); } assertEquals("Expected " + expectedWarnings.length + " warnings but found " + actualWarnings.size() + "\nExpected: " + Arrays.asList(expectedWarnings) + "\nActual: " + actualWarnings, @@ -629,10 +633,14 @@ public abstract class ESTestCase extends LuceneTestCase { return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true); } - private static String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"}; + private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m", "micros", "nanos"}; - private static String randomTimeValue(int lower, int upper) { - return randomIntBetween(lower, upper) + randomFrom(TIME_SUFFIXES); + public static String randomTimeValue(int lower, int upper, String[] suffixes) { + return randomIntBetween(lower, upper) + randomFrom(suffixes); + } + + public static String randomTimeValue(int lower, int upper) { + return randomTimeValue(lower, upper, TIME_SUFFIXES); } public static String randomTimeValue() { @@ -920,18 +928,39 @@ public abstract class ESTestCase extends LuceneTestCase { * recursive shuffling behavior can be made by passing in the names of fields which * internally should stay untouched. */ - protected static XContentBuilder shuffleXContent(XContentParser parser, boolean prettyPrint, String... exceptFieldNames) - throws IOException { - //we need a sorted map for reproducibility, as we are going to shuffle its keys and write XContent back - Map shuffledMap = shuffleMap((LinkedHashMap)parser.mapOrdered(), - new HashSet<>(Arrays.asList(exceptFieldNames))); + public XContentBuilder shuffleXContent(XContentParser parser, boolean prettyPrint, String... exceptFieldNames) throws IOException { XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()); if (prettyPrint) { xContentBuilder.prettyPrint(); } + Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken(); + if (token == Token.START_ARRAY) { + List shuffledList = shuffleList(parser.listOrderedMap(), new HashSet<>(Arrays.asList(exceptFieldNames))); + return xContentBuilder.value(shuffledList); + } + //we need a sorted map for reproducibility, as we are going to shuffle its keys and write XContent back + Map shuffledMap = shuffleMap((LinkedHashMap)parser.mapOrdered(), + new HashSet<>(Arrays.asList(exceptFieldNames))); return xContentBuilder.map(shuffledMap); } + // shuffle fields of objects in the list, but not the list itself + private static List shuffleList(List list, Set exceptFields) { + List targetList = new ArrayList<>(); + for(Object value : list) { + if (value instanceof Map) { + @SuppressWarnings("unchecked") + LinkedHashMap valueMap = (LinkedHashMap) value; + targetList.add(shuffleMap(valueMap, exceptFields)); + } else if(value instanceof List) { + targetList.add(shuffleList((List) value, exceptFields)); + } else { + targetList.add(value); + } + } + return targetList; + } + public static LinkedHashMap shuffleMap(LinkedHashMap map, Set exceptFields) { List keys = new ArrayList<>(map.keySet()); LinkedHashMap targetMap = new LinkedHashMap<>(); @@ -942,6 +971,8 @@ public abstract class ESTestCase extends LuceneTestCase { @SuppressWarnings("unchecked") LinkedHashMap valueMap = (LinkedHashMap) value; targetMap.put(key, shuffleMap(valueMap, exceptFields)); + } else if(value instanceof List && exceptFields.contains(key) == false) { + targetMap.put(key, shuffleList((List) value, exceptFields)); } else { targetMap.put(key, value); }