diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/all-signatures.txt index c1e65cbaf22..f72de510721 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/all-signatures.txt @@ -45,6 +45,7 @@ org.apache.lucene.search.NumericRangeFilter org.apache.lucene.search.PrefixFilter org.apache.lucene.search.QueryWrapperFilter org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter +org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory) java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. @@ -125,4 +126,6 @@ java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness -java.util.Random#() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness +@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness +java.util.Random#() +java.util.concurrent.ThreadLocalRandom \ No newline at end of file diff --git a/core/build.gradle b/core/build.gradle index 7b80449729e..229951f895d 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -102,8 +102,8 @@ if (isEclipse) { } } -compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked" -compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked" +compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked" +compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked" forbiddenPatterns { exclude '**/*.json' diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 9f9dbf18869..4a35bcbcfb0 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -566,7 +566,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90), AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91), DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92), - INDEX_WARMER_MISSING_EXCEPTION(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, org.elasticsearch.search.warmer.IndexWarmerMissingException::new, 93), + // 93 used to be for IndexWarmerMissingException NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94), INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96), ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97), diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index ac2575597e8..e55800682dd 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -286,7 +285,8 @@ public class Version { public static final Version CURRENT = V_3_0_0; static { - assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]"; + assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" + + org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]"; } public static Version readVersion(StreamInput in) throws IOException { @@ -457,7 +457,6 @@ public class Version { return V_0_90_0_RC1; case V_0_90_0_Beta1_ID: return V_0_90_0_Beta1; - case V_0_20_7_ID: return V_0_20_7; case V_0_20_6_ID: @@ -476,7 +475,6 @@ public class Version { return V_0_20_0; case V_0_20_0_RC1_ID: return V_0_20_0_RC1; - case V_0_19_0_RC1_ID: return V_0_19_0_RC1; case V_0_19_0_RC2_ID: @@ -511,7 +509,6 @@ public class Version { return V_0_19_12; case V_0_19_13_ID: return V_0_19_13; - case V_0_18_0_ID: return V_0_18_0; case V_0_18_1_ID: @@ -530,9 +527,8 @@ public class Version { return V_0_18_7; case V_0_18_8_ID: return V_0_18_8; - default: - return new Version(id, false, Lucene.VERSION); + return new Version(id, false, org.apache.lucene.util.Version.LATEST); } } diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 11cafb326a0..5f1a181fabb 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -127,12 +127,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction; -import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; @@ -304,9 +298,6 @@ public class ActionModule extends AbstractModule { registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); - registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class); - registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class); - registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class); registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/AliasesRequest.java b/core/src/main/java/org/elasticsearch/action/AliasesRequest.java index 6e45af0cf84..a4ff57ebd20 100644 --- a/core/src/main/java/org/elasticsearch/action/AliasesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/AliasesRequest.java @@ -35,7 +35,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable { /** * Sets the array of aliases that the action relates to */ - AliasesRequest aliases(String[] aliases); + AliasesRequest aliases(String... aliases); /** * Returns true if wildcards expressions among aliases should be resolved, false otherwise diff --git a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java index 9200f99c6f7..4c62a7e849b 100644 --- a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java @@ -41,9 +41,9 @@ public interface IndicesRequest { IndicesOptions indicesOptions(); static interface Replaceable extends IndicesRequest { - /* - * Sets the array of indices that the action relates to + /** + * Sets the indices that the action relates to. */ - IndicesRequest indices(String[] indices); + IndicesRequest indices(String... indices); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index d603886d924..59b426d8c31 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -61,7 +61,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest { public static enum Feature { ALIASES((byte) 0, "_aliases", "_alias"), MAPPINGS((byte) 1, "_mappings", "_mapping"), - SETTINGS((byte) 2, "_settings"), - WARMERS((byte) 3, "_warmers", "_warmer"); + SETTINGS((byte) 2, "_settings"); private static final Feature[] FEATURES = new Feature[Feature.values().length]; @@ -97,7 +96,7 @@ public class GetIndexRequest extends ClusterInfoRequest { } } - private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS, Feature.WARMERS }; + private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS }; private Feature[] features = DEFAULT_FEATURES; private boolean humanReadable = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 1f06a25acc3..3a29237faeb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; import java.util.ArrayList; @@ -39,19 +38,15 @@ import java.util.List; */ public class GetIndexResponse extends ActionResponse { - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap settings = ImmutableOpenMap.of(); private String[] indices; - GetIndexResponse(String[] indices, ImmutableOpenMap> warmers, + GetIndexResponse(String[] indices, ImmutableOpenMap> mappings, ImmutableOpenMap> aliases, ImmutableOpenMap settings) { this.indices = indices; - if (warmers != null) { - this.warmers = warmers; - } if (mappings != null) { this.mappings = mappings; } @@ -74,14 +69,6 @@ public class GetIndexResponse extends ActionResponse { return indices(); } - public ImmutableOpenMap> warmers() { - return warmers; - } - - public ImmutableOpenMap> getWarmers() { - return warmers(); - } - public ImmutableOpenMap> mappings() { return mappings; } @@ -110,23 +97,6 @@ public class GetIndexResponse extends ActionResponse { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); this.indices = in.readStringArray(); - int warmersSize = in.readVInt(); - ImmutableOpenMap.Builder> warmersMapBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < warmersSize; i++) { - String key = in.readString(); - int valueSize = in.readVInt(); - List warmerEntryBuilder = new ArrayList<>(); - for (int j = 0; j < valueSize; j++) { - warmerEntryBuilder.add(new IndexWarmersMetaData.Entry( - in.readString(), - in.readStringArray(), - in.readOptionalBoolean(), - in.readBoolean() ? new IndexWarmersMetaData.SearchSource(in) : null) - ); - } - warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); - } - warmers = warmersMapBuilder.build(); int mappingsSize = in.readVInt(); ImmutableOpenMap.Builder> mappingsMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < mappingsSize; i++) { @@ -164,21 +134,6 @@ public class GetIndexResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); - out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { - out.writeString(indexEntry.key); - out.writeVInt(indexEntry.value.size()); - for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { - out.writeString(warmerEntry.name()); - out.writeStringArray(warmerEntry.types()); - out.writeOptionalBoolean(warmerEntry.requestCache()); - boolean hasSource = warmerEntry.source() != null; - out.writeBoolean(hasSource); - if (hasSource) { - warmerEntry.source().writeTo(out); - } - } - } out.writeVInt(mappings.size()); for (ObjectObjectCursor> indexEntry : mappings) { out.writeString(indexEntry.key); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 4edbd5216cd..1b9180ce192 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -72,7 +71,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction listener) { - ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap settings = ImmutableOpenMap.of(); @@ -80,15 +78,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction im * Sets the indices this put mapping operation will execute on. */ @Override - public PutMappingRequest indices(String[] indices) { + public PutMappingRequest indices(String... indices) { this.indices = indices; return this; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index ebdc8b72c74..85644e8523e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.indexing.IndexingStats; +import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.recovery.RecoveryStats; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 326dbc01289..225ee326b95 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -59,7 +60,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicReferenceArray; /** @@ -108,7 +108,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(Randomness.get().nextInt(1000)), request.indices()); return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local"); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java deleted file mode 100644 index 86c447d3ca4..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; - -/** - * Action for the admin/warmers/delete API. - */ -public class DeleteWarmerAction extends Action { - - public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction(); - public static final String NAME = "indices:admin/warmers/delete"; - - private DeleteWarmerAction() { - super(NAME); - } - - @Override - public DeleteWarmerResponse newResponse() { - return new DeleteWarmerResponse(); - } - - @Override - public DeleteWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new DeleteWarmerRequestBuilder(client, this); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java deleted file mode 100644 index 39312e5a3d7..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.CollectionUtils; - -import java.io.IOException; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request that deletes a index warmer (name, {@link org.elasticsearch.action.search.SearchRequest}) - * tuple from the clusters metadata. - */ -public class DeleteWarmerRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { - - private String[] names = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); - private String[] indices = Strings.EMPTY_ARRAY; - - public DeleteWarmerRequest() { - } - - /** - * Constructs a new delete warmer request for the specified name. - * - * @param names the name (or wildcard expression) of the warmer to match, null to delete all. - */ - public DeleteWarmerRequest(String... names) { - names(names); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (CollectionUtils.isEmpty(names)) { - validationException = addValidationError("warmer names are missing", validationException); - } else { - validationException = checkForEmptyString(validationException, names); - } - if (CollectionUtils.isEmpty(indices)) { - validationException = addValidationError("indices are missing", validationException); - } else { - validationException = checkForEmptyString(validationException, indices); - } - return validationException; - } - - private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) { - boolean containsEmptyString = false; - for (String string : strings) { - if (!Strings.hasText(string)) { - containsEmptyString = true; - } - } - if (containsEmptyString) { - validationException = addValidationError("types must not contain empty strings", validationException); - } - return validationException; - } - - /** - * The name to delete. - */ - @Nullable - public String[] names() { - return names; - } - - /** - * The name (or wildcard expression) of the index warmer to delete, or null - * to delete all warmers. - */ - public DeleteWarmerRequest names(@Nullable String... names) { - this.names = names; - return this; - } - - /** - * Sets the indices this put mapping operation will execute on. - */ - @Override - public DeleteWarmerRequest indices(String... indices) { - this.indices = indices; - return this; - } - - /** - * The indices the mappings will be put. - */ - @Override - public String[] indices() { - return indices; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - names = in.readStringArray(); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - readTimeout(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArrayNullable(names); - out.writeStringArrayNullable(indices); - indicesOptions.writeIndicesOptions(out); - writeTimeout(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java deleted file mode 100644 index fdba95b590f..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerRequestBuilder.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; - -/** - * A builder for the {@link DeleteWarmerRequest} - * - * @see DeleteWarmerRequest for details - */ -public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder { - - public DeleteWarmerRequestBuilder(ElasticsearchClient client, DeleteWarmerAction action) { - super(client, action, new DeleteWarmerRequest()); - } - - public DeleteWarmerRequestBuilder setIndices(String... indices) { - request.indices(indices); - return this; - } - - /** - * The name (or wildcard expression) of the index warmer to delete, or null - * to delete all warmers. - */ - public DeleteWarmerRequestBuilder setNames(String... names) { - request.names(names); - return this; - } - - /** - * Specifies what type of requested indices to ignore and wildcard indices expressions. - *

- * For example indices that don't exist. - */ - public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) { - request.indicesOptions(options); - return this; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java deleted file mode 100644 index 6e5235f78b2..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerResponse.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * An acknowledged response of delete warmer operation. - */ -public class DeleteWarmerResponse extends AcknowledgedResponse { - - DeleteWarmerResponse() { - super(); - } - - DeleteWarmerResponse(boolean acknowledged) { - super(acknowledged); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - readAcknowledged(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - writeAcknowledged(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java deleted file mode 100644 index 293729af462..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.search.warmer.IndexWarmerMissingException; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Internal Actions executed on the master deleting the warmer from the cluster state metadata. - * - * Note: this is an internal API and should not be used / called by any client code. - */ -public class TransportDeleteWarmerAction extends TransportMasterNodeAction { - - @Inject - public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteWarmerRequest::new); - } - - @Override - protected String executor() { - // we go async right away - return ThreadPool.Names.SAME; - } - - @Override - protected DeleteWarmerResponse newResponse() { - return new DeleteWarmerResponse(); - } - - @Override - protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request)); - } - - @Override - protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected DeleteWarmerResponse newResponse(boolean acknowledged) { - return new DeleteWarmerResponse(acknowledged); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), concreteIndices); - super.onFailure(source, t); - } - - @Override - public ClusterState execute(ClusterState currentState) { - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - - boolean globalFoundAtLeastOne = false; - boolean deleteAll = false; - for (int i=0; i entries = new ArrayList<>(); - for (IndexWarmersMetaData.Entry entry : warmers.entries()) { - boolean keepWarmer = true; - for (String warmer : request.names()) { - if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) { - globalFoundAtLeastOne = true; - keepWarmer = false; - // don't add it... - break; - } - } - if (keepWarmer) { - entries.add(entry); - } - } - // a change, update it... - if (entries.size() != warmers.entries().size()) { - warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()])); - IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers); - mdBuilder.put(indexBuilder); - } - } - } - - if (globalFoundAtLeastOne == false && deleteAll == false) { - throw new IndexWarmerMissingException(request.names()); - } - - if (logger.isInfoEnabled()) { - for (String index : concreteIndices) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE); - if (warmers != null) { - for (IndexWarmersMetaData.Entry entry : warmers.entries()) { - for (String warmer : request.names()) { - if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) { - logger.info("[{}] delete warmer [{}]", index, entry.name()); - } - } - } - } else if(deleteAll){ - logger.debug("no warmers to delete on index [{}]", index); - } - } - } - - return ClusterState.builder(currentState).metaData(mdBuilder).build(); - } - }); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java deleted file mode 100644 index e2debde72a6..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.get; - -import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; - -/** - * Action for the admin/warmers/get API. - */ -public class GetWarmersAction extends Action { - - public static final GetWarmersAction INSTANCE = new GetWarmersAction(); - public static final String NAME = "indices:admin/warmers/get"; - - private GetWarmersAction() { - super(NAME); - } - - @Override - public GetWarmersRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new GetWarmersRequestBuilder(client, this); - } - - @Override - public GetWarmersResponse newResponse() { - return new GetWarmersResponse(); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java deleted file mode 100644 index bebf0d40b6e..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.get; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * A {@link ClusterInfoRequest} that fetches {@link org.elasticsearch.search.warmer.IndexWarmersMetaData} for - * a list or all existing index warmers in the cluster-state - */ -public class GetWarmersRequest extends ClusterInfoRequest { - - private String[] warmers = Strings.EMPTY_ARRAY; - - public GetWarmersRequest warmers(String[] warmers) { - this.warmers = warmers; - return this; - } - - public String[] warmers() { - return warmers; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - warmers = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(warmers); - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java deleted file mode 100644 index de67d38603a..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersRequestBuilder.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.get; - -import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.util.ArrayUtils; - -/** - * Builder for {@link GetWarmersRequest} - * - * @see GetWarmersRequest for details - */ -public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder { - - public GetWarmersRequestBuilder(ElasticsearchClient client, GetWarmersAction action, String... indices) { - super(client, action, new GetWarmersRequest().indices(indices)); - } - - public GetWarmersRequestBuilder setWarmers(String... warmers) { - request.warmers(warmers); - return this; - } - - public GetWarmersRequestBuilder addWarmers(String... warmers) { - request.warmers(ArrayUtils.concat(request.warmers(), warmers)); - return this; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java deleted file mode 100644 index 0559e522925..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.get; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified - * in the {@link GetWarmersRequest}. This information is fetched from the current master since the metadata - * is contained inside the cluster-state - */ -public class GetWarmersResponse extends ActionResponse { - - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); - - GetWarmersResponse(ImmutableOpenMap> warmers) { - this.warmers = warmers; - } - - GetWarmersResponse() { - } - - public ImmutableOpenMap> warmers() { - return warmers; - } - - public ImmutableOpenMap> getWarmers() { - return warmers(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); - for (int i = 0; i < size; i++) { - String key = in.readString(); - int valueSize = in.readVInt(); - List warmerEntryBuilder = new ArrayList<>(); - for (int j = 0; j < valueSize; j++) { - String name = in.readString(); - String[] types = in.readStringArray(); - IndexWarmersMetaData.SearchSource source = null; - if (in.readBoolean()) { - source = new IndexWarmersMetaData.SearchSource(in); - } - Boolean queryCache = null; - queryCache = in.readOptionalBoolean(); - warmerEntryBuilder.add(new IndexWarmersMetaData.Entry( - name, - types, - queryCache, - source) - ); - } - indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); - } - warmers = indexMapBuilder.build(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { - out.writeString(indexEntry.key); - out.writeVInt(indexEntry.value.size()); - for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { - out.writeString(warmerEntry.name()); - out.writeStringArray(warmerEntry.types()); - boolean hasWarmerSource = warmerEntry != null; - out.writeBoolean(hasWarmerSource); - if (hasWarmerSource) { - warmerEntry.source().writeTo(out); - } - out.writeOptionalBoolean(warmerEntry.requestCache()); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java deleted file mode 100644 index a86a6260ca3..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.get; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.List; - -/** - * Internal Actions executed on the master fetching the warmer from the cluster state metadata. - * - * Note: this is an internal API and should not be used / called by any client code. - */ -public class TransportGetWarmersAction extends TransportClusterInfoAction { - - @Inject - public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetWarmersRequest::new); - } - - @Override - protected String executor() { - // very lightweight operation, no need to fork - return ThreadPool.Names.SAME; - } - - @Override - protected ClusterBlockException checkBlock(GetWarmersRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); - } - - @Override - protected GetWarmersResponse newResponse() { - return new GetWarmersResponse(); - } - - @Override - protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener listener) { - ImmutableOpenMap> result = state.metaData().findWarmers( - concreteIndices, request.types(), request.warmers() - ); - listener.onResponse(new GetWarmersResponse(result)); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/package-info.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/package-info.java deleted file mode 100644 index 053cc75caa3..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/package-info.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Index / Search Warmer Administrative Actions - *

- * Index warming allows to run registered search requests to warm up the index before it is available for search. - * With the near real time aspect of search, cold data (segments) will be warmed up before they become available for - * search. This includes things such as the query cache, filesystem cache, and loading field data for fields. - *

- * - * See the reference guide for more detailed information about the Indices / Search Warmer - */ -package org.elasticsearch.action.admin.indices.warmer; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java deleted file mode 100644 index dbf136dec12..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.put; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request that associates a {@link SearchRequest} with a name in the cluster that is - * in-turn used to warm up indices before they are available for search. - * - * Note: neither the search request nor the name must be null - */ -public class PutWarmerRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { - - private String name; - - private SearchRequest searchRequest; - - public PutWarmerRequest() { - } - - /** - * Constructs a new warmer. - * - * @param name The name of the warmer. - */ - public PutWarmerRequest(String name) { - this.name = name; - } - - /** - * Sets the name of the warmer. - */ - public PutWarmerRequest name(String name) { - this.name = name; - return this; - } - - public String name() { - return this.name; - } - - /** - * Sets the search request to warm. - */ - public PutWarmerRequest searchRequest(SearchRequest searchRequest) { - this.searchRequest = searchRequest; - return this; - } - - /** - * Sets the search request to warm. - */ - public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) { - this.searchRequest = searchRequest.request(); - return this; - } - - public SearchRequest searchRequest() { - return this.searchRequest; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (searchRequest == null) { - validationException = addValidationError("search request is missing", validationException); - } else { - validationException = searchRequest.validate(); - } - if (name == null) { - validationException = addValidationError("name is missing", validationException); - } - return validationException; - } - - @Override - public String[] indices() { - if (searchRequest == null) { - throw new IllegalStateException("unable to retrieve indices, search request is null"); - } - return searchRequest.indices(); - } - - @Override - public IndicesRequest indices(String[] indices) { - if (searchRequest == null) { - throw new IllegalStateException("unable to set indices, search request is null"); - } - searchRequest.indices(indices); - return this; - } - - @Override - public IndicesOptions indicesOptions() { - if (searchRequest == null) { - throw new IllegalStateException("unable to retrieve indices options, search request is null"); - } - return searchRequest.indicesOptions(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - name = in.readString(); - if (in.readBoolean()) { - searchRequest = new SearchRequest(); - searchRequest.readFrom(in); - } - readTimeout(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(name); - if (searchRequest == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - searchRequest.writeTo(out); - } - writeTimeout(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java deleted file mode 100644 index 39b7a370584..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestBuilder.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.put; - -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; - -/** - * Builder for {@link PutWarmerRequest} - * - * @see PutWarmerRequest for details - */ -public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder { - - /** - * Creates a new {@link PutWarmerRequestBuilder} with a given name. - */ - public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action, String name) { - super(client, action, new PutWarmerRequest().name(name)); - } - - /** - * Creates a new {@link PutWarmerRequestBuilder} - * Note: {@link #setName(String)} must be called with a non-null value before this request is executed. - */ - public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action) { - super(client, action, new PutWarmerRequest()); - } - - /** - * Sets the name of the warmer. - */ - public PutWarmerRequestBuilder setName(String name) { - request.name(name); - return this; - } - - /** - * Sets the search request to use to warm the index when applicable. - */ - public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) { - request.searchRequest(searchRequest); - return this; - } - - /** - * Sets the search request to use to warm the index when applicable. - */ - public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) { - request.searchRequest(searchRequest); - return this; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java deleted file mode 100644 index 008b239920c..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerResponse.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.put; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * An acknowledged response of put warmer operation. - */ -public class PutWarmerResponse extends AcknowledgedResponse { - - PutWarmerResponse() { - super(); - } - - PutWarmerResponse(boolean acknowledged) { - super(acknowledged); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - readAcknowledged(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - writeAcknowledged(out); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java deleted file mode 100644 index 8dd671b4da0..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.warmer.put; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Internal Actions executed on the master associating a warmer with a name in the cluster state metadata. - * - * Note: this is an internal API and should not be used / called by any client code. - */ -public class TransportPutWarmerAction extends TransportMasterNodeAction { - - private final TransportSearchAction searchAction; - - @Inject - public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - TransportSearchAction searchAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutWarmerRequest::new); - this.searchAction = searchAction; - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected PutWarmerResponse newResponse() { - return new PutWarmerResponse(); - } - - @Override - protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - ClusterBlockException status = state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); - if (status != null) { - return status; - } - // PutWarmer executes a SearchQuery before adding the new warmer to the cluster state, - // so we need to check the same block as TransportSearchTypeAction here - return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); - } - - @Override - protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener listener) { - // first execute the search request, see that its ok... - SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request); - searchAction.execute(searchRequest, new ActionListener() { - @Override - public void onResponse(SearchResponse searchResponse) { - if (searchResponse.getFailedShards() > 0) { - listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures()))); - return; - } - - clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected PutWarmerResponse newResponse(boolean acknowledged) { - return new PutWarmerResponse(acknowledged); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices()); - super.onFailure(source, t); - } - - @Override - public ClusterState execute(ClusterState currentState) { - MetaData metaData = currentState.metaData(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(currentState, request.searchRequest().indicesOptions(), request.searchRequest().indices()); - - IndexWarmersMetaData.SearchSource source = null; - if (request.searchRequest().source() != null) { - source = new IndexWarmersMetaData.SearchSource(request.searchRequest().source()); - } - - // now replace it on the metadata - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - - for (String index : concreteIndices) { - IndexMetaData indexMetaData = metaData.index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE); - if (warmers == null) { - logger.info("[{}] putting warmer [{}]", index, request.name()); - warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source)); - } else { - boolean found = false; - List entries = new ArrayList<>(warmers.entries().size() + 1); - for (IndexWarmersMetaData.Entry entry : warmers.entries()) { - if (entry.name().equals(request.name())) { - found = true; - entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source)); - } else { - entries.add(entry); - } - } - if (!found) { - logger.info("[{}] put warmer [{}]", index, request.name()); - entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source)); - } else { - logger.info("[{}] update warmer [{}]", index, request.name()); - } - warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()])); - } - IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers); - mdBuilder.put(indexBuilder); - } - - return ClusterState.builder(currentState).metaData(mdBuilder).build(); - } - }); - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index fad3953bc09..08e90a0817d 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -419,7 +419,7 @@ public class TransportShardBulkAction extends TransportReplicationAction> EMPTY_HL = Collections.emptyList(); - - private long count; - private float[] scores; - private BytesRef[] matches; - private List> hls; - private byte percolatorTypeId; + private TopDocs topDocs; + private Map ids; + private Map> hls; + private boolean onlyCount; private int requestedSize; private InternalAggregations aggregations; private List pipelineAggregators; PercolateShardResponse() { - hls = new ArrayList<>(); } - public PercolateShardResponse(BytesRef[] matches, List> hls, long count, float[] scores, PercolateContext context, ShardId shardId) { - super(shardId); - this.matches = matches; + public PercolateShardResponse(TopDocs topDocs, Map ids, Map> hls, PercolateContext context) { + super(new ShardId(context.shardTarget().getIndex(), context.shardTarget().getShardId())); + this.topDocs = topDocs; + this.ids = ids; this.hls = hls; - this.count = count; - this.scores = scores; - this.percolatorTypeId = context.percolatorTypeId; + this.onlyCount = context.isOnlyCount(); this.requestedSize = context.size(); QuerySearchResult result = context.queryResult(); if (result != null) { @@ -78,39 +72,25 @@ public class PercolateShardResponse extends BroadcastShardResponse { } } - public PercolateShardResponse(BytesRef[] matches, long count, float[] scores, PercolateContext context, ShardId shardId) { - this(matches, EMPTY_HL, count, scores, context, shardId); + public TopDocs topDocs() { + return topDocs; } - public PercolateShardResponse(BytesRef[] matches, List> hls, long count, PercolateContext context, ShardId shardId) { - this(matches, hls, count, EMPTY_SCORES, context, shardId); - } - - public PercolateShardResponse(long count, PercolateContext context, ShardId shardId) { - this(EMPTY_MATCHES, EMPTY_HL, count, EMPTY_SCORES, context, shardId); - } - - public PercolateShardResponse(PercolateContext context, ShardId shardId) { - this(EMPTY_MATCHES, EMPTY_HL, 0, EMPTY_SCORES, context, shardId); - } - - public BytesRef[] matches() { - return matches; - } - - public float[] scores() { - return scores; - } - - public long count() { - return count; + /** + * Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query. + */ + public Map ids() { + return ids; } public int requestedSize() { return requestedSize; } - public List> hls() { + /** + * Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query. + */ + public Map> hls() { return hls; } @@ -122,36 +102,35 @@ public class PercolateShardResponse extends BroadcastShardResponse { return pipelineAggregators; } - public byte percolatorTypeId() { - return percolatorTypeId; + public boolean onlyCount() { + return onlyCount; } public boolean isEmpty() { - return percolatorTypeId == 0x00; + return topDocs.totalHits == 0; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - percolatorTypeId = in.readByte(); + onlyCount = in.readBoolean(); requestedSize = in.readVInt(); - count = in.readVLong(); - matches = new BytesRef[in.readVInt()]; - for (int i = 0; i < matches.length; i++) { - matches[i] = in.readBytesRef(); - } - scores = new float[in.readVInt()]; - for (int i = 0; i < scores.length; i++) { - scores[i] = in.readFloat(); - } + topDocs = Lucene.readTopDocs(in); int size = in.readVInt(); + ids = new HashMap<>(size); for (int i = 0; i < size; i++) { + ids.put(in.readVInt(), in.readString()); + } + size = in.readVInt(); + hls = new HashMap<>(size); + for (int i = 0; i < size; i++) { + int docId = in.readVInt(); int mSize = in.readVInt(); Map fields = new HashMap<>(); for (int j = 0; j < mSize; j++) { fields.put(in.readString(), HighlightField.readHighlightField(in)); } - hls.add(fields); + hls.put(docId, fields); } aggregations = InternalAggregations.readOptionalAggregations(in); if (in.readBoolean()) { @@ -169,23 +148,21 @@ public class PercolateShardResponse extends BroadcastShardResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeByte(percolatorTypeId); + out.writeBoolean(onlyCount); out.writeVLong(requestedSize); - out.writeVLong(count); - out.writeVInt(matches.length); - for (BytesRef match : matches) { - out.writeBytesRef(match); - } - out.writeVLong(scores.length); - for (float score : scores) { - out.writeFloat(score); + Lucene.writeTopDocs(out, topDocs); + out.writeVInt(ids.size()); + for (Map.Entry entry : ids.entrySet()) { + out.writeVInt(entry.getKey()); + out.writeString(entry.getValue()); } out.writeVInt(hls.size()); - for (Map hl : hls) { - out.writeVInt(hl.size()); - for (Map.Entry entry : hl.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + for (Map.Entry> entry1 : hls.entrySet()) { + out.writeVInt(entry1.getKey()); + out.writeVInt(entry1.getValue().size()); + for (Map.Entry entry2 : entry1.getValue().entrySet()) { + out.writeString(entry2.getKey()); + entry2.getValue().writeTo(out); } } out.writeOptionalStreamable(aggregations); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index c808177e9fe..fdac839e143 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.percolate; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.get.GetRequest; @@ -43,6 +44,7 @@ import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -117,7 +119,7 @@ public class TransportPercolateAction extends TransportBroadcastAction shardResults = null; List shardFailures = null; - byte percolatorTypeId = 0x00; + boolean onlyCount = false; for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); if (shardResponse == null) { @@ -133,7 +135,7 @@ public class TransportPercolateAction extends TransportBroadcastAction(); } shardResults.add(percolateShardResponse); @@ -146,7 +148,12 @@ public class TransportPercolateAction extends TransportBroadcastAction extends HandledTransportAction { - private static final ClusterStateObserver.ChangePredicate masterNodeChangedPredicate = new ClusterStateObserver.ChangePredicate() { - @Override - public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, - ClusterState newState, ClusterState.ClusterStateStatus newStatus) { - // The condition !newState.nodes().masterNodeId().equals(previousState.nodes().masterNodeId()) is not sufficient as the same master node might get reelected after a disruption. - return newState.nodes().masterNodeId() != null && newState != previousState; - } - - @Override - public boolean apply(ClusterChangedEvent event) { - return event.nodesDelta().masterNodeChanged(); - } - }; - protected final TransportService transportService; protected final ClusterService clusterService; @@ -164,7 +150,7 @@ public abstract class TransportMasterNodeAction(listener) { @Override @@ -195,7 +181,7 @@ public abstract class TransportMasterNodeAction putWarmer(PutWarmerRequest request); - - /** - * Puts an index search warmer to be applies when applicable. - */ - void putWarmer(PutWarmerRequest request, ActionListener listener); - - /** - * Puts an index search warmer to be applies when applicable. - */ - PutWarmerRequestBuilder preparePutWarmer(String name); - - /** - * Deletes an index warmer. - */ - ActionFuture deleteWarmer(DeleteWarmerRequest request); - - /** - * Deletes an index warmer. - */ - void deleteWarmer(DeleteWarmerRequest request, ActionListener listener); - - /** - * Deletes an index warmer. - */ - DeleteWarmerRequestBuilder prepareDeleteWarmer(); - - /** - * Returns a map of index warmers for the given get request. - */ - void getWarmers(GetWarmersRequest request, ActionListener listener); - - /** - * Returns a map of index warmers for the given get request. - */ - ActionFuture getWarmers(GetWarmersRequest request); - - /** - * Returns a new builder to fetch index warmer metadata for the given indices. - */ - GetWarmersRequestBuilder prepareGetWarmers(String... indices); - /** * Executed a per index settings get request and returns the settings for the indices specified. * Note: this is a per index request and will not include settings that are set on the cluster diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index e085c8da075..e5a465442bb 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -232,18 +232,6 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -1669,51 +1657,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture putWarmer(PutWarmerRequest request) { - return execute(PutWarmerAction.INSTANCE, request); - } - - @Override - public void putWarmer(PutWarmerRequest request, ActionListener listener) { - execute(PutWarmerAction.INSTANCE, request, listener); - } - - @Override - public PutWarmerRequestBuilder preparePutWarmer(String name) { - return new PutWarmerRequestBuilder(this, PutWarmerAction.INSTANCE, name); - } - - @Override - public ActionFuture deleteWarmer(DeleteWarmerRequest request) { - return execute(DeleteWarmerAction.INSTANCE, request); - } - - @Override - public void deleteWarmer(DeleteWarmerRequest request, ActionListener listener) { - execute(DeleteWarmerAction.INSTANCE, request, listener); - } - - @Override - public DeleteWarmerRequestBuilder prepareDeleteWarmer() { - return new DeleteWarmerRequestBuilder(this, DeleteWarmerAction.INSTANCE); - } - - @Override - public GetWarmersRequestBuilder prepareGetWarmers(String... indices) { - return new GetWarmersRequestBuilder(this, GetWarmersAction.INSTANCE, indices); - } - - @Override - public ActionFuture getWarmers(GetWarmersRequest request) { - return execute(GetWarmersAction.INSTANCE, request); - } - - @Override - public void getWarmers(GetWarmersRequest request, ActionListener listener) { - execute(GetWarmersAction.INSTANCE, request, listener); - } - @Override public GetSettingsRequestBuilder prepareGetSettings(String... indices) { return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index c0370248ad7..5dce6d5757b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -67,7 +67,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.indexing.IndexingSlowLog; +import org.elasticsearch.index.IndexingSlowLog; import org.elasticsearch.index.search.stats.SearchSlowLog; import org.elasticsearch.index.settings.IndexDynamicSettings; import org.elasticsearch.index.shard.IndexShard; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java similarity index 53% rename from core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java rename to core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java index 3c5c8b7c412..6d91ec7fd27 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java @@ -17,30 +17,24 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.warmer.put; +package org.elasticsearch.cluster; -import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; +public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate { + INSTANCE; -/** - * Action for the admin/warmers/put API. - */ -public class PutWarmerAction extends Action { - - public static final PutWarmerAction INSTANCE = new PutWarmerAction(); - public static final String NAME = "indices:admin/warmers/put"; - - private PutWarmerAction() { - super(NAME); + @Override + public boolean apply( + ClusterState previousState, + ClusterState.ClusterStateStatus previousStatus, + ClusterState newState, + ClusterState.ClusterStateStatus newStatus) { + // checking if the masterNodeId changed is insufficient as the + // same master node might get re-elected after a disruption + return newState.nodes().masterNodeId() != null && newState != previousState; } @Override - public PutWarmerResponse newResponse() { - return new PutWarmerResponse(); - } - - @Override - public PutWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new PutWarmerRequestBuilder(client, this); + public boolean apply(ClusterChangedEvent changedEvent) { + return changedEvent.nodesDelta().masterNodeChanged(); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 58b766e8d84..00a238504f2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -302,6 +302,10 @@ public class ShardStateAction extends AbstractComponent { this.failure = failure; } + public ShardRouting getShardRouting() { + return shardRouting; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b07364a7f70..7fce86ab60a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -48,7 +48,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -88,11 +87,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static Map customPrototypes = new HashMap<>(); - static { - // register non plugin custom metadata - registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO); - } - /** * Register a custom index meta data factory. Make sure to call it from a static block. */ @@ -950,10 +944,16 @@ public class IndexMetaData implements Diffable, FromXContentBuild if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token parser.nextToken(); } + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + throw new IllegalArgumentException("expected field name but got a " + parser.currentToken()); + } Builder builder = new Builder(parser.currentName()); String currentFieldName = null; XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("expected object but got a " + token); + } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -968,6 +968,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild String mappingType = currentFieldName; Map mappingSource = MapBuilder.newMapBuilder().put(mappingType, parser.mapOrdered()).map(); builder.putMapping(new MappingMetaData(mappingType, mappingSource)); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); } } } else if (KEY_ALIASES.equals(currentFieldName)) { @@ -987,8 +989,17 @@ public class IndexMetaData implements Diffable, FromXContentBuild } } builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); } } + } else if ("warmers".equals(currentFieldName)) { + // TODO: do this in 4.0: + // throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?"); + // ignore: warmers have been removed in 3.0 and are + // simply ignored when upgrading from 2.x + assert Version.CURRENT.major <= 3; + parser.skipChildren(); } else { // check if its a custom index metadata Custom proto = lookupPrototype(currentFieldName); @@ -1023,13 +1034,19 @@ public class IndexMetaData implements Diffable, FromXContentBuild } } builder.primaryTerms(list.toArray()); + } else { + throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName); } } else if (token.isValue()) { if (KEY_STATE.equals(currentFieldName)) { builder.state(State.fromString(parser.text())); } else if (KEY_VERSION.equals(currentFieldName)) { builder.version(parser.longValue()); + } else { + throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } + } else { + throw new IllegalArgumentException("Unexpected token " + token); } } return builder.build(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index d904a3ca3ea..002d1a51107 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -55,12 +55,10 @@ import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.EnumSet; @@ -71,7 +69,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; @@ -365,49 +362,6 @@ public class MetaData implements Iterable, Diffable, Fr return indexMapBuilder.build(); } - public ImmutableOpenMap> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { - assert uncheckedWarmers != null; - assert concreteIndices != null; - if (concreteIndices.length == 0) { - return ImmutableOpenMap.of(); - } - // special _all check to behave the same like not specifying anything for the warmers (not for the indices) - final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers; - - ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); - Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); - for (String index : intersection) { - IndexMetaData indexMetaData = indices.get(index); - IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE); - if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) { - continue; - } - - // TODO: make this a List so we don't have to copy below - Collection filteredWarmers = - indexWarmersMetaData - .entries() - .stream() - .filter(warmer -> { - if (warmers.length != 0 && types.length != 0) { - return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types()); - } else if (warmers.length != 0) { - return Regex.simpleMatch(warmers, warmer.name()); - } else if (types.length != 0) { - return Regex.simpleMatch(types, warmer.types()); - } else { - return true; - } - }) - .collect(Collectors.toCollection(ArrayList::new)); - - if (!filteredWarmers.isEmpty()) { - mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers))); - } - } - return mapBuilder.build(); - } - /** * Returns all the concrete indices. */ @@ -1120,14 +1074,20 @@ public class MetaData implements Iterable, Diffable, Fr if (token == XContentParser.Token.START_OBJECT) { // move to the field name (meta-data) token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new IllegalArgumentException("Expected a field name but got " + token); + } // move to the next object token = parser.nextToken(); } currentFieldName = parser.currentName(); - if (token == null) { - // no data... - return builder.build(); - } + } + + if (!"meta-data".equals(parser.currentName())) { + throw new IllegalArgumentException("Expected [meta-data] as a field name but got " + currentFieldName); + } + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected a START_OBJECT but got " + token); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -1160,7 +1120,11 @@ public class MetaData implements Iterable, Diffable, Fr builder.version = parser.longValue(); } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) { builder.clusterUUID = parser.text(); + } else { + throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } + } else { + throw new IllegalArgumentException("Unexpected token " + token); } } return builder.build(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index c1e159b2185..b9fc6687b3e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -223,7 +223,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) { - try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) { + try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) { for (ObjectCursor cursor : indexMetaData.getMappings().values()) { MappingMetaData mappingMetaData = cursor.value; mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index ec20d307eed..4e7b680c8ce 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -26,6 +26,7 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +40,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.concurrent.ThreadLocalRandom; /** * The {@link IndexRoutingTable} represents routing information for a single @@ -71,7 +71,7 @@ public class IndexRoutingTable extends AbstractDiffable imple IndexRoutingTable(String index, ImmutableOpenIntMap shards) { this.index = index; - this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); + this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); this.shards = shards; List allActiveShards = new ArrayList<>(); for (IntObjectCursor cursor : shards) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 7bedaa99fbe..e51ffb9631d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,8 +29,14 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.*; -import java.util.concurrent.ThreadLocalRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; import static java.util.Collections.emptyMap; @@ -66,7 +73,7 @@ public class IndexShardRoutingTable implements Iterable { IndexShardRoutingTable(ShardId shardId, List shards) { this.shardId = shardId; - this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); + this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); this.shards = Collections.unmodifiableList(shards); ShardRouting primary = null; @@ -419,13 +426,21 @@ public class IndexShardRoutingTable implements Iterable { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } IndexShardRoutingTable that = (IndexShardRoutingTable) o; - if (!shardId.equals(that.shardId)) return false; - if (!shards.equals(that.shards)) return false; + if (!shardId.equals(that.shardId)) { + return false; + } + if (!shards.equals(that.shards)) { + return false; + } return true; } diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index dbfa8034b99..7f71afc1c70 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -109,6 +109,7 @@ public final class Randomness { } } + @SuppressForbidden(reason = "ThreadLocalRandom is okay when not running tests") private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; return ThreadLocalRandom.current(); diff --git a/core/src/main/java/org/elasticsearch/common/hash/MurmurHash3.java b/core/src/main/java/org/elasticsearch/common/hash/MurmurHash3.java index c9c0d29c448..ba159f30a22 100644 --- a/core/src/main/java/org/elasticsearch/common/hash/MurmurHash3.java +++ b/core/src/main/java/org/elasticsearch/common/hash/MurmurHash3.java @@ -59,6 +59,7 @@ public enum MurmurHash3 { * Note, this hashing function might be used to persist hashes, so if the way hashes are computed * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). */ + @SuppressWarnings("fallthrough") // Intentionally uses fallthrough to implement a well known hashing algorithm public static Hash128 hash128(byte[] key, int offset, int length, long seed, Hash128 hash) { long h1 = seed; long h2 = seed; diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java index 1f0e05f43c0..29863527202 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java @@ -258,6 +258,12 @@ public final class FactoryProvider2 implements InvocationHandler, Provider return o == this || o == factory; } + @Override + public int hashCode() { + // This way both this and its factory hash to the same spot, making hashCode consistent. + return factory.hashCode(); + } + /** * Returns true if {@code thrown} can be thrown by {@code invoked} without wrapping. */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 155a8ca02f7..2d2719a113e 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -39,10 +39,12 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { protected int count; /** - * Create a non recycling {@link BytesStreamOutput} with 1 initial page acquired. + * Create a non recycling {@link BytesStreamOutput} with an initial capacity of 0. */ public BytesStreamOutput() { - this(BigArrays.PAGE_SIZE_IN_BYTES); + // since this impl is not recycling anyway, don't bother aligning to + // the page size, this will even save memory + this(0); } /** diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 558e92c4fb8..b693af1e6d1 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -86,11 +86,6 @@ import java.util.Objects; * */ public class Lucene { - - // TODO: remove VERSION, and have users use Version.LATEST. - public static final Version VERSION = Version.LATEST; - public static final Version ANALYZER_VERSION = VERSION; - public static final Version QUERYPARSER_VERSION = VERSION; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; public static final String LATEST_CODEC = "Lucene54"; @@ -109,7 +104,6 @@ public class Lucene { public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); - @SuppressWarnings("deprecation") public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) { if (version == null) { return defaultVersion; diff --git a/core/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java b/core/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java deleted file mode 100644 index 6c8e0b45f70..00000000000 --- a/core/src/main/java/org/elasticsearch/common/math/UnboxedMathUtils.java +++ /dev/null @@ -1,593 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.math; - -import org.elasticsearch.common.SuppressForbidden; - -import java.util.concurrent.ThreadLocalRandom; - -/** - * - */ -public class UnboxedMathUtils { - - public static double sin(Short a) { - return Math.sin(a.doubleValue()); - } - - public static double sin(Integer a) { - return Math.sin(a.doubleValue()); - } - - public static double sin(Float a) { - return Math.sin(a.doubleValue()); - } - - public static double sin(Long a) { - return Math.sin(a.doubleValue()); - } - - public static double sin(Double a) { - return Math.sin(a); - } - - public static double cos(Short a) { - return Math.cos(a.doubleValue()); - } - - public static double cos(Integer a) { - return Math.cos(a.doubleValue()); - } - - public static double cos(Float a) { - return Math.cos(a.doubleValue()); - } - - public static double cos(Long a) { - return Math.cos(a.doubleValue()); - } - - public static double cos(Double a) { - return Math.cos(a); - } - - public static double tan(Short a) { - return Math.tan(a.doubleValue()); - } - - public static double tan(Integer a) { - return Math.tan(a.doubleValue()); - } - - public static double tan(Float a) { - return Math.tan(a.doubleValue()); - } - - public static double tan(Long a) { - return Math.tan(a.doubleValue()); - } - - public static double tan(Double a) { - return Math.tan(a); - } - - public static double asin(Short a) { - return Math.asin(a.doubleValue()); - } - - public static double asin(Integer a) { - return Math.asin(a.doubleValue()); - } - - public static double asin(Float a) { - return Math.asin(a.doubleValue()); - } - - public static double asin(Long a) { - return Math.asin(a.doubleValue()); - } - - public static double asin(Double a) { - return Math.asin(a); - } - - public static double acos(Short a) { - return Math.acos(a.doubleValue()); - } - - - public static double acos(Integer a) { - return Math.acos(a.doubleValue()); - } - - - public static double acos(Float a) { - return Math.acos(a.doubleValue()); - } - - public static double acos(Long a) { - return Math.acos(a.doubleValue()); - } - - public static double acos(Double a) { - return Math.acos(a); - } - - public static double atan(Short a) { - return Math.atan(a.doubleValue()); - } - - public static double atan(Integer a) { - return Math.atan(a.doubleValue()); - } - - public static double atan(Float a) { - return Math.atan(a.doubleValue()); - } - - public static double atan(Long a) { - return Math.atan(a.doubleValue()); - } - - public static double atan(Double a) { - return Math.atan(a); - } - - public static double toRadians(Short angdeg) { - return Math.toRadians(angdeg.doubleValue()); - } - - public static double toRadians(Integer angdeg) { - return Math.toRadians(angdeg.doubleValue()); - } - - public static double toRadians(Float angdeg) { - return Math.toRadians(angdeg.doubleValue()); - } - - public static double toRadians(Long angdeg) { - return Math.toRadians(angdeg.doubleValue()); - } - - public static double toRadians(Double angdeg) { - return Math.toRadians(angdeg); - } - - public static double toDegrees(Short angrad) { - return Math.toDegrees(angrad.doubleValue()); - } - - public static double toDegrees(Integer angrad) { - return Math.toDegrees(angrad.doubleValue()); - } - - public static double toDegrees(Float angrad) { - return Math.toDegrees(angrad.doubleValue()); - } - - public static double toDegrees(Long angrad) { - return Math.toDegrees(angrad.doubleValue()); - } - - public static double toDegrees(Double angrad) { - return Math.toDegrees(angrad); - } - - public static double exp(Short a) { - return Math.exp(a.doubleValue()); - } - - public static double exp(Integer a) { - return Math.exp(a.doubleValue()); - } - - public static double exp(Float a) { - return Math.exp(a.doubleValue()); - } - - public static double exp(Long a) { - return Math.exp(a.doubleValue()); - } - - public static double exp(Double a) { - return Math.exp(a); - } - - public static double log(Short a) { - return Math.log(a.doubleValue()); - } - - public static double log(Integer a) { - return Math.log(a.doubleValue()); - } - - public static double log(Float a) { - return Math.log(a.doubleValue()); - } - - public static double log(Long a) { - return Math.log(a.doubleValue()); - } - - public static double log(Double a) { - return Math.log(a); - } - - public static double log10(Short a) { - return Math.log10(a.doubleValue()); - } - - public static double log10(Integer a) { - return Math.log10(a.doubleValue()); - } - - public static double log10(Float a) { - return Math.log10(a.doubleValue()); - } - - public static double log10(Long a) { - return Math.log10(a.doubleValue()); - } - - public static double log10(Double a) { - return Math.log10(a); - } - - public static double sqrt(Short a) { - return Math.sqrt(a.doubleValue()); - } - - public static double sqrt(Integer a) { - return Math.sqrt(a.doubleValue()); - } - - public static double sqrt(Float a) { - return Math.sqrt(a.doubleValue()); - } - - public static double sqrt(Long a) { - return Math.sqrt(a.doubleValue()); - } - - public static double sqrt(Double a) { - return Math.sqrt(a); - } - - public static double cbrt(Short a) { - return Math.cbrt(a.doubleValue()); - } - - public static double cbrt(Integer a) { - return Math.cbrt(a.doubleValue()); - } - - public static double cbrt(Float a) { - return Math.cbrt(a.doubleValue()); - } - - public static double cbrt(Long a) { - return Math.cbrt(a.doubleValue()); - } - - public static double cbrt(Double a) { - return Math.cbrt(a); - } - - public static double IEEEremainder(Short f1, Short f2) { - return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue()); - } - - public static double IEEEremainder(Integer f1, Integer f2) { - return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue()); - } - - public static double IEEEremainder(Float f1, Float f2) { - return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue()); - } - - public static double IEEEremainder(Long f1, Long f2) { - return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue()); - } - - public static double IEEEremainder(Double f1, Double f2) { - return Math.IEEEremainder(f1, f2); - } - - public static double ceil(Short a) { - return Math.ceil(a.doubleValue()); - } - - public static double ceil(Integer a) { - return Math.ceil(a.doubleValue()); - } - - public static double ceil(Float a) { - return Math.ceil(a.doubleValue()); - } - - public static double ceil(Long a) { - return Math.ceil(a.doubleValue()); - } - - public static double ceil(Double a) { - return Math.ceil(a); - } - - public static double floor(Short a) { - return Math.floor(a.doubleValue()); - } - - public static double floor(Integer a) { - return Math.floor(a.doubleValue()); - } - - public static double floor(Float a) { - return Math.floor(a.doubleValue()); - } - - public static double floor(Long a) { - return Math.floor(a.doubleValue()); - } - - public static double floor(Double a) { - return Math.floor(a); - } - - public static double rint(Short a) { - return Math.rint(a.doubleValue()); - } - - public static double rint(Integer a) { - return Math.rint(a.doubleValue()); - } - - public static double rint(Float a) { - return Math.rint(a.doubleValue()); - } - - public static double rint(Long a) { - return Math.rint(a.doubleValue()); - } - - public static double rint(Double a) { - return Math.rint(a); - } - - public static double atan2(Short y, Short x) { - return Math.atan2(y.doubleValue(), x.doubleValue()); - } - - public static double atan2(Integer y, Integer x) { - return Math.atan2(y.doubleValue(), x.doubleValue()); - } - - public static double atan2(Float y, Float x) { - return Math.atan2(y.doubleValue(), x.doubleValue()); - } - - public static double atan2(Long y, Long x) { - return Math.atan2(y.doubleValue(), x.doubleValue()); - } - - public static double atan2(Double y, Double x) { - return Math.atan2(y, x); - } - - public static double pow(Short a, Short b) { - return Math.pow(a.doubleValue(), b.doubleValue()); - } - - public static double pow(Integer a, Integer b) { - return Math.pow(a.doubleValue(), b.doubleValue()); - } - - public static double pow(Float a, Float b) { - return Math.pow(a.doubleValue(), b.doubleValue()); - } - - public static double pow(Long a, Long b) { - return Math.pow(a.doubleValue(), b.doubleValue()); - } - - public static double pow(Double a, Double b) { - return Math.pow(a, b); - } - - public static int round(Short a) { - return Math.round(a.floatValue()); - } - - public static int round(Integer a) { - return Math.round(a.floatValue()); - } - - public static int round(Float a) { - return Math.round(a); - } - - public static long round(Long a) { - return Math.round(a.doubleValue()); - } - - public static long round(Double a) { - return Math.round(a); - } - - public static double random() { - return ThreadLocalRandom.current().nextDouble(); - } - - public static double randomDouble() { - return ThreadLocalRandom.current().nextDouble(); - } - - public static double randomFloat() { - return ThreadLocalRandom.current().nextFloat(); - } - - public static double randomInt() { - return ThreadLocalRandom.current().nextInt(); - } - - public static double randomInt(Integer i) { - return ThreadLocalRandom.current().nextInt(i); - } - - public static double randomLong() { - return ThreadLocalRandom.current().nextLong(); - } - - public static double randomLong(Long l) { - return ThreadLocalRandom.current().nextLong(l); - } - - @SuppressForbidden(reason = "Math#abs is trappy") - public static int abs(Integer a) { - return Math.abs(a); - } - - @SuppressForbidden(reason = "Math#abs is trappy") - public static long abs(Long a) { - return Math.abs(a); - } - - @SuppressForbidden(reason = "Math#abs is trappy") - public static float abs(Float a) { - return Math.abs(a); - } - - @SuppressForbidden(reason = "Math#abs is trappy") - public static double abs(Double a) { - return Math.abs(a); - } - - public static int max(Integer a, Integer b) { - return Math.max(a, b); - } - - public static long max(Long a, Long b) { - return Math.max(a, b); - } - - public static float max(Float a, Float b) { - return Math.max(a, b); - } - - public static double max(Double a, Double b) { - return Math.max(a, b); - } - - public static int min(Integer a, Integer b) { - return Math.min(a, b); - } - - public static long min(Long a, Long b) { - return Math.min(a, b); - } - - public static float min(Float a, Float b) { - return Math.min(a, b); - } - - public static double min(Double a, Double b) { - return Math.min(a, b); - } - - public static double ulp(Double d) { - return Math.ulp(d); - } - - public static float ulp(Float f) { - return Math.ulp(f); - } - - public static double signum(Double d) { - return Math.signum(d); - } - - public static float signum(Float f) { - return Math.signum(f); - } - - public static double sinh(Double x) { - return Math.sinh(x); - } - - public static double cosh(Double x) { - return Math.cosh(x); - } - - public static double tanh(Double x) { - return Math.tanh(x); - } - - public static double hypot(Double x, Double y) { - return Math.hypot(x, y); - } - - public static double expm1(Double x) { - return Math.expm1(x); - } - - public static double log1p(Double x) { - return Math.log1p(x); - } - - public static double copySign(Double magnitude, Double sign) { - return Math.copySign(magnitude, sign); - } - - public static float copySign(Float magnitude, Float sign) { - return Math.copySign(magnitude, sign); - } - - public static int getExponent(Float f) { - return Math.getExponent(f); - } - - public static int getExponent(Double d) { - return Math.getExponent(d); - } - - public static double nextAfter(Double start, Double direction) { - return Math.nextAfter(start, direction); - } - - public static float nextAfter(Float start, Double direction) { - return Math.nextAfter(start, direction); - } - - public static double nextUp(Double d) { - return Math.nextUp(d); - } - - public static float nextUp(Float f) { - return Math.nextUp(f); - } - - - public static double scalb(Double d, Integer scaleFactor) { - return Math.scalb(d, scaleFactor); - } - - public static float scalb(Float f, Integer scaleFactor) { - return Math.scalb(f, scaleFactor); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 12e22a7693b..b3abed6e230 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -88,9 +88,6 @@ import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemp import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; -import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; import org.elasticsearch.rest.action.bulk.RestBulkAction; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestAliasAction; @@ -205,10 +202,6 @@ public class NetworkModule extends AbstractModule { RestDeleteIndexTemplateAction.class, RestHeadIndexTemplateAction.class, - RestPutWarmerAction.class, - RestDeleteWarmerAction.class, - RestGetWarmerAction.class, - RestPutMappingAction.class, RestGetMappingAction.class, RestGetFieldMappingAction.class, diff --git a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java index b19d727f022..fdc94d53849 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ b/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java @@ -519,6 +519,7 @@ public class BloomFilter { return k; } + @SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm public static long hash3_x64_128(byte[] key, int offset, int length, long seed) { final int nblocks = length >> 4; // Process as 128-bit blocks. @@ -598,7 +599,7 @@ public class BloomFilter { case 2: k1 ^= ((long) key[offset + 1]) << 8; case 1: - k1 ^= ((long) key[offset]); + k1 ^= (key[offset]); k1 *= c1; k1 = rotl64(k1, 31); k1 *= c2; diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 100b8b7ae81..3d1a9f8ed76 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; @@ -68,6 +67,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -101,6 +101,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicBoolean deleted = new AtomicBoolean(false); private final IndexSettings indexSettings; + private final IndexingSlowLog slowLog; public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, SimilarityService similarityService, @@ -117,7 +118,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC this.indexSettings = indexSettings; this.analysisService = registry.build(indexSettings); this.similarityService = similarityService; - this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry); + this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::getQueryShardContext); this.indexFieldData = new IndexFieldDataService(indexSettings, nodeServicesProvider.getIndicesFieldDataCache(), nodeServicesProvider.getCircuitBreakerService(), mapperService); this.shardStoreDeleter = shardStoreDeleter; this.eventListener = eventListener; @@ -130,6 +131,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC this.engineFactory = engineFactory; // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); + this.slowLog = new IndexingSlowLog(indexSettings.getSettings()); } public int numberOfShards() { @@ -292,9 +294,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); if (useShadowEngine(primary, indexSettings)) { - indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); + indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); // no indexing listeners - shadow engines don't index } else { - indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); + indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, slowLog); } eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); @@ -552,6 +554,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } catch (Exception e) { logger.warn("failed to refresh index store settings", e); } + try { + slowLog.onRefreshSettings(settings); // this will be refactored soon anyway so duplication is ok here + } catch (Exception e) { + logger.warn("failed to refresh slowlog settings", e); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java similarity index 96% rename from core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java rename to core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 292c2a16e91..5cd3685b2f8 100644 --- a/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.indexing; +package org.elasticsearch.index; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.shard.IndexingOperationListener; import java.io.IOException; import java.util.Locale; @@ -35,7 +36,7 @@ import java.util.concurrent.TimeUnit; /** */ -public final class IndexingSlowLog { +public final class IndexingSlowLog implements IndexingOperationListener { private boolean reformat; @@ -124,8 +125,9 @@ public final class IndexingSlowLog { } } - void postIndex(Engine.Index index, long tookInNanos) { - postIndexing(index.parsedDoc(), tookInNanos); + public void postIndex(Engine.Index index) { + final long took = index.endTime() - index.startTime(); + postIndexing(index.parsedDoc(), took); } /** @@ -192,4 +194,4 @@ public final class IndexingSlowLog { return sb.toString(); } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 43c9af672d1..a27b49b9618 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -89,12 +89,12 @@ public class Analysis { // check for explicit version on the specific analyzer component String sVersion = settings.get("version"); if (sVersion != null) { - return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger); + return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // check for explicit version on the index itself as default for all analysis components sVersion = indexSettings.get("index.analysis.version"); if (sVersion != null) { - return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger); + return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // resolve the analysis version based on the version the index was created with return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index c833f41457e..1fd3a4d96b0 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -181,6 +181,7 @@ public final class AnalysisRegistry implements Closeable { tokenizers.put("standard", StandardTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("keyword", KeywordTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); @@ -409,6 +410,7 @@ public final class AnalysisRegistry implements Closeable { // Tokenizer aliases tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT))); tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT))); + tokenizerFactories.put("PathHierarchy", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.PATH_HIERARCHY.getTokenizerFactory(Version.CURRENT))); // Token filters diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 4e9ecf569d0..1de139aa695 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -118,6 +118,12 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException { final Object coreCacheReader = context.reader().getCoreCacheKey(); final ShardId shardId = ShardUtils.extractShardId(context.reader()); + if (shardId != null // can't require it because of the percolator + && indexSettings.getIndex().getName().equals(shardId.getIndex()) == false) { + // insanity + throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex() + + "] with cache of index [" + indexSettings.getIndex().getName() + "]"); + } Cache filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> { context.reader().addCoreClosedListener(BitsetFilterCache.this); return CacheBuilder.builder().build(); @@ -208,6 +214,11 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L @Override public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { + if (indexSettings.getIndex().equals(indexShard.getIndexSettings().getIndex()) == false) { + // this is from a different index + return TerminationHandle.NO_WAIT; + } + if (!loadRandomAccessFiltersEagerly) { return TerminationHandle.NO_WAIT; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index b7667378e47..f6d4601e002 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -50,6 +50,8 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -178,10 +180,10 @@ public abstract class Engine implements Closeable { * is enabled */ protected static final class IndexThrottle { - + private final CounterMetric throttleTimeMillisMetric = new CounterMetric(); + private volatile long startOfThrottleNS; private static final ReleasableLock NOOP_LOCK = new ReleasableLock(new NoOpLock()); private final ReleasableLock lockReference = new ReleasableLock(new ReentrantLock()); - private volatile ReleasableLock lock = NOOP_LOCK; public Releasable acquireThrottle() { @@ -191,6 +193,7 @@ public abstract class Engine implements Closeable { /** Activate throttling, which switches the lock to be a real lock */ public void activate() { assert lock == NOOP_LOCK : "throttling activated while already active"; + startOfThrottleNS = System.nanoTime(); lock = lockReference; } @@ -198,7 +201,45 @@ public abstract class Engine implements Closeable { public void deactivate() { assert lock != NOOP_LOCK : "throttling deactivated but not active"; lock = NOOP_LOCK; + + assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS"; + long throttleTimeNS = System.nanoTime() - startOfThrottleNS; + if (throttleTimeNS >= 0) { + // Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number + throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS)); + } } + + long getThrottleTimeInMillis() { + long currentThrottleNS = 0; + if (isThrottled() && startOfThrottleNS != 0) { + currentThrottleNS += System.nanoTime() - startOfThrottleNS; + if (currentThrottleNS < 0) { + // Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value + currentThrottleNS = 0; + } + } + return throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS); + } + + boolean isThrottled() { + return lock != NOOP_LOCK; + } + } + + /** + * Returns the number of milliseconds this engine was under index throttling. + */ + public long getIndexThrottleTimeInMillis() { + return 0; + } + + /** + * Returns the true iff this engine is currently under index throttling. + * @see #getIndexThrottleTimeInMillis() + */ + public boolean isThrottled() { + return false; } /** A Lock implementation that always allows the lock to be acquired */ @@ -936,7 +977,7 @@ public abstract class Engine implements Closeable { } } - public static class GetResult { + public static class GetResult implements Releasable { private final boolean exists; private final long version; private final Translog.Source source; @@ -982,6 +1023,11 @@ public abstract class Engine implements Closeable { return docIdAndVersion; } + @Override + public void close() { + release(); + } + public void release() { if (searcher != null) { searcher.close(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 2065036b7d3..b5e55378076 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -30,13 +30,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.indices.memory.IndexingMemoryController; +import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.TimeUnit; @@ -58,7 +57,6 @@ public final class EngineConfig { private final TimeValue flushMergesAfter; private final String codecName; private final ThreadPool threadPool; - private final ShardIndexingService indexingService; private final Engine.Warmer warmer; private final Store store; private final SnapshotDeletionPolicy deletionPolicy; @@ -108,7 +106,7 @@ public final class EngineConfig { /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, + public EngineConfig(ShardId shardId, ThreadPool threadPool, IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, @@ -117,9 +115,7 @@ public final class EngineConfig { final Settings settings = indexSettings.getSettings(); this.indexSettings = indexSettings; this.threadPool = threadPool; - this.indexingService = indexingService; - this.warmer = warmer == null ? (a, b) -> { - } : warmer; + this.warmer = warmer == null ? (a,b) -> {} : warmer; this.store = store; this.deletionPolicy = deletionPolicy; this.mergePolicy = mergePolicy; @@ -197,7 +193,7 @@ public final class EngineConfig { } /** - * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link org.elasticsearch.indices.memory.IndexingMemoryController} + * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link IndexingMemoryController} */ public ByteSizeValue getIndexingBufferSize() { return indexingBufferSize; @@ -241,17 +237,6 @@ public final class EngineConfig { return threadPool; } - /** - * Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about - * pre and post index. The operations are used for statistic purposes etc. - * - * @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index) - * @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index) - */ - public ShardIndexingService getIndexingService() { - return indexingService; - } - /** * Returns an {@link org.elasticsearch.index.engine.Engine.Warmer} used to warm new searchers before they are used for searching. */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 13940dd0cd0..81007f1c2e9 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -59,7 +59,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -97,7 +96,6 @@ public class InternalEngine extends Engine { */ private volatile long lastDeleteVersionPruneTimeMSec; - private final ShardIndexingService indexingService; private final Engine.Warmer warmer; private final Translog translog; private final ElasticsearchConcurrentMergeScheduler mergeScheduler; @@ -135,7 +133,6 @@ public class InternalEngine extends Engine { boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); - this.indexingService = engineConfig.getIndexingService(); this.warmer = engineConfig.getWarmer(); seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings()); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig()); @@ -430,8 +427,6 @@ public class InternalEngine extends Engine { versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); index.setTranslogLocation(translogLocation); - - indexingService.postIndexUnderLock(index); return created; } finally { if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -543,7 +538,6 @@ public class InternalEngine extends Engine { Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation)); delete.setTranslogLocation(translogLocation); - indexingService.postDeleteUnderLock(delete); } finally { if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(delete.seqNo()); @@ -989,8 +983,7 @@ public class InternalEngine extends Engine { }); return new IndexWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { - boolean isLocked = IndexWriter.isLocked(store.directory()); - logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked); + logger.warn("could not lock IndexWriter", ex); throw ex; } } @@ -1083,6 +1076,10 @@ public class InternalEngine extends Engine { throttle.deactivate(); } + public long getIndexThrottleTimeInMillis() { + return throttle.getThrottleTimeInMillis(); + } + long getGcDeletesInMillis() { return engineConfig.getGcDeletesInMillis(); } @@ -1105,7 +1102,6 @@ public class InternalEngine extends Engine { if (numMergesInFlight.incrementAndGet() > maxNumMerges) { if (isThrottling.getAndSet(true) == false) { logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - indexingService.throttlingActivated(); activateThrottling(); } } @@ -1117,7 +1113,6 @@ public class InternalEngine extends Engine { if (numMergesInFlight.decrementAndGet() < maxNumMerges) { if (isThrottling.getAndSet(false)) { logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); - indexingService.throttlingDeactivated(); deactivateThrottling(); } } diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java index a9880d59f65..1751a820d8c 100644 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java +++ b/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java @@ -58,11 +58,16 @@ public class SingleFieldsVisitor extends FieldsVisitor { public void postProcess(MappedFieldType fieldType) { if (uid != null) { - // TODO: this switch seems very wrong...either each case should be breaking, or this should not be a switch switch (field) { - case UidFieldMapper.NAME: addValue(field, uid.toString()); - case IdFieldMapper.NAME: addValue(field, uid.id()); - case TypeFieldMapper.NAME: addValue(field, uid.type()); + case UidFieldMapper.NAME: + addValue(field, uid.toString()); + break; + case IdFieldMapper.NAME: + addValue(field, uid.id()); + break; + case TypeFieldMapper.NAME: + addValue(field, uid.type()); + break; } } diff --git a/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java deleted file mode 100644 index 651bc405a84..00000000000 --- a/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.indexing; - -import org.elasticsearch.index.engine.Engine; - -/** - * An indexing listener for indexing, delete, events. - */ -public abstract class IndexingOperationListener { - - /** - * Called before the indexing occurs. - */ - public Engine.Index preIndex(Engine.Index operation) { - return operation; - } - - /** - * Called after the indexing occurs, under a locking scheme to maintain - * concurrent updates to the same doc. - *

- * Note, long operations should not occur under this callback. - */ - public void postIndexUnderLock(Engine.Index index) { - - } - - /** - * Called after the indexing operation occurred. - */ - public void postIndex(Engine.Index index) { - - } - - /** - * Called after the indexing operation occurred with exception. - */ - public void postIndex(Engine.Index index, Throwable ex) { - - } - - /** - * Called before the delete occurs. - */ - public Engine.Delete preDelete(Engine.Delete delete) { - return delete; - } - - /** - * Called after the delete occurs, under a locking scheme to maintain - * concurrent updates to the same doc. - *

- * Note, long operations should not occur under this callback. - */ - public void postDeleteUnderLock(Engine.Delete delete) { - - } - - /** - * Called after the delete operation occurred. - */ - public void postDelete(Engine.Delete delete) { - - } - - /** - * Called after the delete operation occurred with exception. - */ - public void postDelete(Engine.Delete delete, Throwable ex) { - - } -} diff --git a/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java b/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java deleted file mode 100644 index 5cf180c3a2e..00000000000 --- a/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.indexing; - -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; - -import static java.util.Collections.emptyMap; - -/** - */ -public class ShardIndexingService extends AbstractIndexShardComponent { - - private final IndexingSlowLog slowLog; - - private final StatsHolder totalStats = new StatsHolder(); - - private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); - - private volatile Map typesStats = emptyMap(); - - public ShardIndexingService(ShardId shardId, IndexSettings indexSettings) { - super(shardId, indexSettings); - this.slowLog = new IndexingSlowLog(this.indexSettings.getSettings()); - } - - /** - * Returns the stats, including type specific stats. If the types are null/0 length, then nothing - * is returned for them. If they are set, then only types provided will be returned, or - * _all for all types. - */ - public IndexingStats stats(String... types) { - IndexingStats.Stats total = totalStats.stats(); - Map typesSt = null; - if (types != null && types.length > 0) { - typesSt = new HashMap<>(typesStats.size()); - if (types.length == 1 && types[0].equals("_all")) { - for (Map.Entry entry : typesStats.entrySet()) { - typesSt.put(entry.getKey(), entry.getValue().stats()); - } - } else { - for (Map.Entry entry : typesStats.entrySet()) { - if (Regex.simpleMatch(types, entry.getKey())) { - typesSt.put(entry.getKey(), entry.getValue().stats()); - } - } - } - } - return new IndexingStats(total, typesSt); - } - - public void addListener(IndexingOperationListener listener) { - listeners.add(listener); - } - - public void removeListener(IndexingOperationListener listener) { - listeners.remove(listener); - } - - public void throttlingActivated() { - totalStats.setThrottled(true); - } - - public void throttlingDeactivated() { - totalStats.setThrottled(false); - } - - public Engine.Index preIndex(Engine.Index operation) { - totalStats.indexCurrent.inc(); - typeStats(operation.type()).indexCurrent.inc(); - for (IndexingOperationListener listener : listeners) { - operation = listener.preIndex(operation); - } - return operation; - } - - public void postIndexUnderLock(Engine.Index index) { - for (IndexingOperationListener listener : listeners) { - try { - listener.postIndexUnderLock(index); - } catch (Exception e) { - logger.warn("postIndexUnderLock listener [{}] failed", e, listener); - } - } - } - - public void postIndex(Engine.Index index) { - long took = index.endTime() - index.startTime(); - totalStats.indexMetric.inc(took); - totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(index.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); - slowLog.postIndex(index, took); - for (IndexingOperationListener listener : listeners) { - try { - listener.postIndex(index); - } catch (Exception e) { - logger.warn("postIndex listener [{}] failed", e, listener); - } - } - } - - public void postIndex(Engine.Index index, Throwable ex) { - totalStats.indexCurrent.dec(); - typeStats(index.type()).indexCurrent.dec(); - totalStats.indexFailed.inc(); - typeStats(index.type()).indexFailed.inc(); - for (IndexingOperationListener listener : listeners) { - try { - listener.postIndex(index, ex); - } catch (Throwable t) { - logger.warn("postIndex listener [{}] failed", t, listener); - } - } - } - - public Engine.Delete preDelete(Engine.Delete delete) { - totalStats.deleteCurrent.inc(); - typeStats(delete.type()).deleteCurrent.inc(); - for (IndexingOperationListener listener : listeners) { - delete = listener.preDelete(delete); - } - return delete; - } - - public void postDeleteUnderLock(Engine.Delete delete) { - for (IndexingOperationListener listener : listeners) { - try { - listener.postDeleteUnderLock(delete); - } catch (Exception e) { - logger.warn("postDeleteUnderLock listener [{}] failed", e, listener); - } - } - } - - public void postDelete(Engine.Delete delete) { - long took = delete.endTime() - delete.startTime(); - totalStats.deleteMetric.inc(took); - totalStats.deleteCurrent.dec(); - StatsHolder typeStats = typeStats(delete.type()); - typeStats.deleteMetric.inc(took); - typeStats.deleteCurrent.dec(); - for (IndexingOperationListener listener : listeners) { - try { - listener.postDelete(delete); - } catch (Exception e) { - logger.warn("postDelete listener [{}] failed", e, listener); - } - } - } - - public void postDelete(Engine.Delete delete, Throwable ex) { - totalStats.deleteCurrent.dec(); - typeStats(delete.type()).deleteCurrent.dec(); - for (IndexingOperationListener listener : listeners) { - try { - listener. postDelete(delete, ex); - } catch (Throwable t) { - logger.warn("postDelete listener [{}] failed", t, listener); - } - } - } - - public void noopUpdate(String type) { - totalStats.noopUpdates.inc(); - typeStats(type).noopUpdates.inc(); - } - - public void clear() { - totalStats.clear(); - synchronized (this) { - if (!typesStats.isEmpty()) { - MapBuilder typesStatsBuilder = MapBuilder.newMapBuilder(); - for (Map.Entry typeStats : typesStats.entrySet()) { - if (typeStats.getValue().totalCurrent() > 0) { - typeStats.getValue().clear(); - typesStatsBuilder.put(typeStats.getKey(), typeStats.getValue()); - } - } - typesStats = typesStatsBuilder.immutableMap(); - } - } - } - - private StatsHolder typeStats(String type) { - StatsHolder stats = typesStats.get(type); - if (stats == null) { - synchronized (this) { - stats = typesStats.get(type); - if (stats == null) { - stats = new StatsHolder(); - typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap(); - } - } - } - return stats; - } - - public void onRefreshSettings(Settings settings) { - slowLog.onRefreshSettings(settings); - } - - static class StatsHolder { - public final MeanMetric indexMetric = new MeanMetric(); - public final MeanMetric deleteMetric = new MeanMetric(); - public final CounterMetric indexCurrent = new CounterMetric(); - public final CounterMetric indexFailed = new CounterMetric(); - public final CounterMetric deleteCurrent = new CounterMetric(); - public final CounterMetric noopUpdates = new CounterMetric(); - public final CounterMetric throttleTimeMillisMetric = new CounterMetric(); - volatile boolean isThrottled = false; - volatile long startOfThrottleNS; - - public IndexingStats.Stats stats() { - long currentThrottleNS = 0; - if (isThrottled && startOfThrottleNS != 0) { - currentThrottleNS += System.nanoTime() - startOfThrottleNS; - if (currentThrottleNS < 0) { - // Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value - currentThrottleNS = 0; - } - } - return new IndexingStats.Stats( - indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(), - deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(), - noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS))); - } - - - void setThrottled(boolean isThrottled) { - if (!this.isThrottled && isThrottled) { - startOfThrottleNS = System.nanoTime(); - } else if (this.isThrottled && !isThrottled) { - assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS"; - long throttleTimeNS = System.nanoTime() - startOfThrottleNS; - if (throttleTimeNS >= 0) { - // Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number - throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS)); - } - } - this.isThrottled = isThrottled; - } - - public long totalCurrent() { - return indexCurrent.count() + deleteMetric.count(); - } - - public void clear() { - indexMetric.clear(); - deleteMetric.clear(); - } - - - } -} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index f087e06e3c5..f95db850373 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -33,12 +33,14 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.mapper.object.RootObjectMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.mapper.MapperRegistry; import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.function.Supplier; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.index.mapper.MapperBuilders.doc; @@ -49,6 +51,7 @@ public class DocumentMapperParser { final AnalysisService analysisService; private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class); private final SimilarityService similarityService; + private final Supplier queryShardContextSupplier; private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser(); @@ -59,18 +62,20 @@ public class DocumentMapperParser { private final Map rootTypeParsers; public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService, - SimilarityService similarityService, MapperRegistry mapperRegistry) { + SimilarityService similarityService, MapperRegistry mapperRegistry, + Supplier queryShardContextSupplier) { this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings()); this.mapperService = mapperService; this.analysisService = analysisService; this.similarityService = similarityService; + this.queryShardContextSupplier = queryShardContextSupplier; this.typeParsers = mapperRegistry.getMapperParsers(); this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); indexVersionCreated = indexSettings.getIndexVersionCreated(); } public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher); + return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get()); } public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index ffdae90c436..4dd43db0517 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -26,6 +26,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityProvider; import java.util.Map; @@ -95,9 +97,11 @@ public abstract class Mapper implements ToXContent, Iterable { private final ParseFieldMatcher parseFieldMatcher; - public ParserContext(String type, AnalysisService analysisService, Function similarityLookupService, + private final QueryShardContext queryShardContext; + + public ParserContext(String type, AnalysisService analysisService, Function similarityLookupService, MapperService mapperService, Function typeParsers, - Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) { + Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) { this.type = type; this.analysisService = analysisService; this.similarityLookupService = similarityLookupService; @@ -105,6 +109,7 @@ public abstract class Mapper implements ToXContent, Iterable { this.typeParsers = typeParsers; this.indexVersionCreated = indexVersionCreated; this.parseFieldMatcher = parseFieldMatcher; + this.queryShardContext = queryShardContext; } public String type() { @@ -135,6 +140,10 @@ public abstract class Mapper implements ToXContent, Iterable { return parseFieldMatcher; } + public QueryShardContext queryShardContext() { + return queryShardContext; + } + public boolean isWithinMultiField() { return false; } protected Function typeParsers() { return typeParsers; } @@ -150,7 +159,7 @@ public abstract class Mapper implements ToXContent, Iterable { static class MultiFieldParserContext extends ParserContext { MultiFieldParserContext(ParserContext in) { - super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher()); + super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext()); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 3f76245aa8f..2ca42413568 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; @@ -64,12 +65,12 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableMap; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; /** @@ -116,11 +117,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable { final MapperRegistry mapperRegistry; public MapperService(IndexSettings indexSettings, AnalysisService analysisService, - SimilarityService similarityService, MapperRegistry mapperRegistry) { + SimilarityService similarityService, MapperRegistry mapperRegistry, + Supplier queryShardContextSupplier) { super(indexSettings); this.analysisService = analysisService; this.fieldTypes = new FieldTypeLookup(); - this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry); + this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry, queryShardContextSupplier); this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer()); this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer()); this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); @@ -131,8 +133,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { "\"_default_\":{\n" + "\"properties\" : {\n" + "\"query\" : {\n" + - "\"type\" : \"object\",\n" + - "\"enabled\" : false\n" + + "\"type\" : \"percolator\"\n" + "}\n" + "}\n" + "}\n" + diff --git a/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java new file mode 100644 index 00000000000..7dc6e51a759 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.logging.support.LoggerMessageFormat; +import org.elasticsearch.index.mapper.ParseContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Utility to extract query terms from queries and create queries from documents. + */ +public final class ExtractQueryTermsService { + + private static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point + + private ExtractQueryTermsService() { + } + + /** + * Extracts all terms from the specified query and adds it to the specified document. + * @param query The query to extract terms from + * @param document The document to add the extracted terms to + * @param queryTermsFieldField The field in the document holding the extracted terms + * @param unknownQueryField The field used to mark a document that not all query terms could be extracted. For example + * the query contained an unsupported query (e.g. WildcardQuery). + * @param fieldType The field type for the query metadata field + */ + public static void extractQueryTerms(Query query, ParseContext.Document document, String queryTermsFieldField, String unknownQueryField, FieldType fieldType) { + Set queryTerms; + try { + queryTerms = extractQueryTerms(query); + } catch (UnsupportedQueryException e) { + document.add(new Field(unknownQueryField, new BytesRef(), fieldType)); + return; + } + for (Term term : queryTerms) { + BytesRefBuilder builder = new BytesRefBuilder(); + builder.append(new BytesRef(term.field())); + builder.append(FIELD_VALUE_SEPARATOR); + builder.append(term.bytes()); + document.add(new Field(queryTermsFieldField, builder.toBytesRef(), fieldType)); + } + } + + /** + * Extracts all query terms from the provided query and adds it to specified list. + * + * From boolean query with no should clauses or phrase queries only the the longest term are selected, + * since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored. + * + * If from part of the query, no query terms can be extracted then term extraction is stopped and + * an UnsupportedQueryException is thrown. + */ + static Set extractQueryTerms(Query query) { + // TODO: add support for the TermsQuery when it has methods to access the actual terms it encapsulates + // TODO: add support for span queries + if (query instanceof TermQuery) { + return Collections.singleton(((TermQuery) query).getTerm()); + } else if (query instanceof PhraseQuery) { + Term[] terms = ((PhraseQuery) query).getTerms(); + if (terms.length == 0) { + return Collections.emptySet(); + } + + // the longest term is likely to be the rarest, + // so from a performance perspective it makes sense to extract that + Term longestTerm = terms[0]; + for (Term term : terms) { + if (longestTerm.bytes().length < term.bytes().length) { + longestTerm = term; + } + } + return Collections.singleton(longestTerm); + } else if (query instanceof BooleanQuery) { + List clauses = ((BooleanQuery) query).clauses(); + boolean hasRequiredClauses = false; + for (BooleanClause clause : clauses) { + if (clause.isRequired()) { + hasRequiredClauses = true; + break; + } + } + if (hasRequiredClauses) { + Set bestClause = null; + for (BooleanClause clause : clauses) { + if (clause.isRequired() == false) { + // skip must_not clauses, we don't need to remember the things that do *not* match... + // skip should clauses, this bq has must clauses, so we don't need to remember should clauses, since they are completely optional. + continue; + } + + Set temp = extractQueryTerms(clause.getQuery()); + bestClause = selectTermListWithTheLongestShortestTerm(temp, bestClause); + } + if (bestClause != null) { + return bestClause; + } else { + return Collections.emptySet(); + } + } else { + Set terms = new HashSet<>(); + for (BooleanClause clause : clauses) { + if (clause.isProhibited()) { + // we don't need to remember the things that do *not* match... + continue; + } + terms.addAll(extractQueryTerms(clause.getQuery())); + } + return terms; + } + } else if (query instanceof ConstantScoreQuery) { + Query wrappedQuery = ((ConstantScoreQuery) query).getQuery(); + return extractQueryTerms(wrappedQuery); + } else if (query instanceof BoostQuery) { + Query wrappedQuery = ((BoostQuery) query).getQuery(); + return extractQueryTerms(wrappedQuery); + } else { + throw new UnsupportedQueryException(query); + } + } + + static Set selectTermListWithTheLongestShortestTerm(Set terms1, Set terms2) { + if (terms1 == null) { + return terms2; + } else if (terms2 == null) { + return terms1; + } else { + int terms1ShortestTerm = minTermLength(terms1); + int terms2ShortestTerm = minTermLength(terms2); + // keep the clause with longest terms, this likely to be rarest. + if (terms1ShortestTerm >= terms2ShortestTerm) { + return terms1; + } else { + return terms2; + } + } + } + + private static int minTermLength(Set terms) { + int min = Integer.MAX_VALUE; + for (Term term : terms) { + min = Math.min(min, term.bytes().length); + } + return min; + } + + /** + * Creates a boolean query with a should clause for each term on all fields of the specified index reader. + */ + public static Query createQueryTermsQuery(IndexReader indexReader, String queryMetadataField, String unknownQueryField) throws IOException { + List extractedTerms = new ArrayList<>(); + extractedTerms.add(new Term(unknownQueryField)); + Fields fields = MultiFields.getFields(indexReader); + for (String field : fields) { + Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + + BytesRef fieldBr = new BytesRef(field); + TermsEnum tenum = terms.iterator(); + for (BytesRef term = tenum.next(); term != null ; term = tenum.next()) { + BytesRefBuilder builder = new BytesRefBuilder(); + builder.append(fieldBr); + builder.append(FIELD_VALUE_SEPARATOR); + builder.append(term); + extractedTerms.add(new Term(queryMetadataField, builder.toBytesRef())); + } + } + return new TermsQuery(extractedTerms); + } + + /** + * Exception indicating that none or some query terms couldn't extracted from a percolator query. + */ + public static class UnsupportedQueryException extends RuntimeException { + + private final Query unsupportedQuery; + + public UnsupportedQueryException(Query unsupportedQuery) { + super(LoggerMessageFormat.format("no query terms can be extracted from query [{}]", unsupportedQuery)); + this.unsupportedQuery = unsupportedQuery; + } + + /** + * The actual Lucene query that was unsupported and caused this exception to be thrown. + */ + public Query getUnsupportedQuery() { + return unsupportedQuery; + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java new file mode 100644 index 00000000000..9a57ea57764 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.apache.lucene.document.Field; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperBuilders; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class PercolatorFieldMapper extends FieldMapper { + + public static final String NAME = "query"; + public static final String CONTENT_TYPE = "percolator"; + public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType(); + + private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; + private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query"; + public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME; + public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME; + + public static class Builder extends FieldMapper.Builder { + + private final QueryShardContext queryShardContext; + + public Builder(QueryShardContext queryShardContext) { + super(NAME, FIELD_TYPE, FIELD_TYPE); + this.queryShardContext = queryShardContext; + } + + @Override + public PercolatorFieldMapper build(BuilderContext context) { + context.path().add(name); + StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); + StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + context.path().remove(); + return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField); + } + + static StringFieldMapper.Builder createStringFieldBuilder(String name) { + StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name); + queryMetaDataFieldBuilder.docValues(false); + queryMetaDataFieldBuilder.store(false); + queryMetaDataFieldBuilder.tokenized(false); + queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS); + return queryMetaDataFieldBuilder; + } + } + + public static class TypeParser implements FieldMapper.TypeParser { + + @Override + public Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + return new Builder(parserContext.queryShardContext()); + } + } + + public static final class PercolatorFieldType extends MappedFieldType { + + public PercolatorFieldType() { + setName(NAME); + setIndexOptions(IndexOptions.NONE); + setDocValuesType(DocValuesType.NONE); + setStored(false); + } + + public PercolatorFieldType(MappedFieldType ref) { + super(ref); + } + + @Override + public MappedFieldType clone() { + return new PercolatorFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + } + + private final boolean mapUnmappedFieldAsString; + private final QueryShardContext queryShardContext; + private final StringFieldMapper queryTermsField; + private final StringFieldMapper unknownQueryField; + + public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + this.queryShardContext = queryShardContext; + this.queryTermsField = queryTermsField; + this.unknownQueryField = unknownQueryField; + this.mapUnmappedFieldAsString = indexSettings.getAsBoolean(PercolatorQueriesRegistry.MAP_UNMAPPED_FIELDS_AS_STRING, false); + } + + @Override + public Mapper parse(ParseContext context) throws IOException { + QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext); + Query query = PercolatorQueriesRegistry.parseQuery(queryShardContext, mapUnmappedFieldAsString, context.parser()); + if (context.flyweight() == false) { + ExtractQueryTermsService.extractQueryTerms(query, context.doc(), queryTermsField.name(), unknownQueryField.name(), queryTermsField.fieldType()); + } + return null; + } + + @Override + public Iterator iterator() { + return Arrays.asList(queryTermsField, unknownQueryField).iterator(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + throw new UnsupportedOperationException("should not be invoked"); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index eaf562e2127..143616b7084 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -31,19 +31,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.indexing.IndexingOperationListener; -import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentTypeListener; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; @@ -54,7 +50,6 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; /** * Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index. @@ -65,45 +60,27 @@ import java.util.concurrent.atomic.AtomicBoolean; */ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable { - public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string"; - - // This is a shard level service, but these below are index level service: - private final MapperService mapperService; - private final IndexFieldDataService indexFieldDataService; - - private final ShardIndexingService indexingService; + public final static String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string"; private final ConcurrentMap percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener(); - private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener(); - private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false); private final QueryShardContext queryShardContext; private boolean mapUnmappedFieldsAsString; private final MeanMetric percolateMetric = new MeanMetric(); private final CounterMetric currentMetric = new CounterMetric(); private final CounterMetric numberOfQueries = new CounterMetric(); - public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, - ShardIndexingService indexingService, MapperService mapperService, - QueryShardContext queryShardContext, - IndexFieldDataService indexFieldDataService) { + public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) { super(shardId, indexSettings); - this.mapperService = mapperService; - this.indexingService = indexingService; this.queryShardContext = queryShardContext; - this.indexFieldDataService = indexFieldDataService; this.mapUnmappedFieldsAsString = this.indexSettings.getSettings().getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false); - mapperService.addTypeListener(percolateTypeListener); } - public ConcurrentMap percolateQueries() { + public ConcurrentMap getPercolateQueries() { return percolateQueries; } @Override public void close() { - mapperService.removeTypeListener(percolateTypeListener); - indexingService.removeListener(realTimePercolatorOperationListener); clear(); } @@ -111,11 +88,6 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent percolateQueries.clear(); } - public void enableRealTimePercolator() { - if (realTimePercolatorEnabled.compareAndSet(false, true)) { - indexingService.addListener(realTimePercolatorOperationListener); - } - } public void addPercolateQuery(String idAsString, BytesReference source) { Query newquery = parsePercolatorDocument(idAsString, source); @@ -133,9 +105,7 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent } } - Query parsePercolatorDocument(String id, BytesReference source) { - String type = null; - BytesReference querySource = null; + public Query parsePercolatorDocument(String id, BytesReference source) { try (XContentParser sourceParser = XContentHelper.createParser(source)) { String currentFieldName = null; XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT @@ -147,38 +117,21 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent currentFieldName = sourceParser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { - if (type != null) { - return parseQuery(type, sourceParser); - } else { - XContentBuilder builder = XContentFactory.contentBuilder(sourceParser.contentType()); - builder.copyCurrentStructure(sourceParser); - querySource = builder.bytes(); - builder.close(); - } + return parseQuery(queryShardContext, mapUnmappedFieldsAsString, sourceParser); } else { sourceParser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { sourceParser.skipChildren(); - } else if (token.isValue()) { - if ("type".equals(currentFieldName)) { - type = sourceParser.text(); - } } } - try (XContentParser queryParser = XContentHelper.createParser(querySource)) { - return parseQuery(type, queryParser); - } } catch (Exception e) { throw new PercolatorException(shardId().index(), "failed to parse query [" + id + "]", e); } + return null; } - private Query parseQuery(String type, XContentParser parser) { - String[] previousTypes = null; - if (type != null) { - previousTypes = QueryShardContext.setTypesWithPrevious(type); - } + public static Query parseQuery(QueryShardContext queryShardContext, boolean mapUnmappedFieldsAsString, XContentParser parser) { QueryShardContext context = new QueryShardContext(queryShardContext); try { context.reset(parser); @@ -200,29 +153,16 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent } catch (IOException e) { throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); } finally { - if (type != null) { - QueryShardContext.setTypes(previousTypes); - } context.reset(null); } } - private class PercolateTypeListener implements DocumentTypeListener { - - @Override - public void beforeCreate(DocumentMapper mapper) { - if (PercolatorService.TYPE_NAME.equals(mapper.type())) { - enableRealTimePercolator(); - } - } - } - public void loadQueries(IndexReader reader) { logger.trace("loading percolator queries..."); final int loadedQueries; try { Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); - QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); + QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger); IndexSearcher indexSearcher = new IndexSearcher(reader); indexSearcher.setQueryCache(null); indexSearcher.search(query, queryCollector); @@ -238,30 +178,26 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent logger.debug("done loading [{}] percolator queries", loadedQueries); } - private class RealTimePercolatorOperationListener extends IndexingOperationListener { - - @Override - public Engine.Index preIndex(Engine.Index operation) { - // validate the query here, before we index - if (PercolatorService.TYPE_NAME.equals(operation.type())) { - parsePercolatorDocument(operation.id(), operation.source()); - } - return operation; + public boolean isPercolatorQuery(Engine.Index operation) { + if (PercolatorService.TYPE_NAME.equals(operation.type())) { + parsePercolatorDocument(operation.id(), operation.source()); + return true; } + return false; + } - @Override - public void postIndexUnderLock(Engine.Index index) { - // add the query under a doc lock - if (PercolatorService.TYPE_NAME.equals(index.type())) { - addPercolateQuery(index.id(), index.source()); - } - } + public boolean isPercolatorQuery(Engine.Delete operation) { + return PercolatorService.TYPE_NAME.equals(operation.type()); + } - @Override - public void postDeleteUnderLock(Engine.Delete delete) { - // remove the query under a lock - if (PercolatorService.TYPE_NAME.equals(delete.type())) { - removePercolateQuery(delete.id()); + public synchronized void updatePercolateQuery(Engine engine, String id) { + // this can be called out of order as long as for every change to a percolator document it's invoked. This will always + // fetch the latest change but might fetch the same change twice if updates / deletes happen concurrently. + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(PercolatorService.TYPE_NAME, id))))) { + if (getResult.exists()) { + addPercolateQuery(id, getResult.source().source); + } else { + removePercolateQuery(id); } } } diff --git a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java b/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java index c79c7d7da25..1bea43e4ea1 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java @@ -45,17 +45,13 @@ final class QueriesLoaderCollector extends SimpleCollector { private final Map queries = new HashMap<>(); private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true); private final PercolatorQueriesRegistry percolator; - private final IndexFieldData uidFieldData; private final ESLogger logger; - private SortedBinaryDocValues uidValues; private LeafReader reader; - QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger, MapperService mapperService, IndexFieldDataService indexFieldDataService) { + QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger) { this.percolator = percolator; this.logger = logger; - final MappedFieldType uidMapper = mapperService.fullName(UidFieldMapper.NAME); - this.uidFieldData = indexFieldDataService.getForField(uidMapper); } public Map queries() { @@ -64,35 +60,27 @@ final class QueriesLoaderCollector extends SimpleCollector { @Override public void collect(int doc) throws IOException { - // the _source is the query + fieldsVisitor.reset(); + reader.document(doc, fieldsVisitor); + final Uid uid = fieldsVisitor.uid(); - uidValues.setDocument(doc); - if (uidValues.count() > 0) { - assert uidValues.count() == 1; - final BytesRef uid = uidValues.valueAt(0); - final BytesRef id = Uid.splitUidIntoTypeAndId(uid)[1]; - fieldsVisitor.reset(); - reader.document(doc, fieldsVisitor); - - try { - // id is only used for logging, if we fail we log the id in the catch statement - final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source()); - if (parseQuery != null) { - queries.put(BytesRef.deepCopyOf(id), parseQuery); - } else { - logger.warn("failed to add query [{}] - parser returned null", id); - } - - } catch (Exception e) { - logger.warn("failed to add query [{}]", e, id.utf8ToString()); + try { + // id is only used for logging, if we fail we log the id in the catch statement + final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source()); + if (parseQuery != null) { + queries.put(new BytesRef(uid.id()), parseQuery); + } else { + logger.warn("failed to add query [{}] - parser returned null", uid); } + + } catch (Exception e) { + logger.warn("failed to add query [{}]", e, uid); } } @Override protected void doSetNextReader(LeafReaderContext context) throws IOException { reader = context.reader(); - uidValues = uidFieldData.load(context).getBytesValues(); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index e3deb6d4476..5851d089b37 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -81,8 +81,6 @@ import org.elasticsearch.index.fielddata.ShardFieldData; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; -import org.elasticsearch.index.indexing.IndexingStats; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MapperService; @@ -115,7 +113,7 @@ import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.query.IndicesQueryCache; -import org.elasticsearch.indices.memory.IndexingMemoryController; +import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.percolator.PercolatorService; @@ -127,6 +125,8 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -145,7 +145,7 @@ public class IndexShard extends AbstractIndexShardComponent { private final IndexCache indexCache; private final Store store; private final MergeSchedulerConfig mergeSchedulerConfig; - private final ShardIndexingService indexingService; + private final InternalIndexingStats internalIndexingStats; private final ShardSearchStats searchService; private final ShardGetService getService; private final ShardIndexWarmerService shardWarmerService; @@ -169,7 +169,6 @@ public class IndexShard extends AbstractIndexShardComponent { private final IndexEventListener indexEventListener; private final IndexSettings idxSettings; private final NodeServicesProvider provider; - private TimeValue refreshInterval; private volatile ScheduledFuture refreshScheduledFuture; @@ -178,6 +177,8 @@ public class IndexShard extends AbstractIndexShardComponent { protected final AtomicReference currentEngineReference = new AtomicReference<>(); protected final EngineFactory engineFactory; + private final IndexingOperationListener indexingOperationListeners; + @Nullable private RecoveryState recoveryState; @@ -217,7 +218,7 @@ public class IndexShard extends AbstractIndexShardComponent { public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, - IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider) { + IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, IndexingOperationListener... listeners) { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); this.inactiveTime = settings.getAsTime(INDEX_SHARD_INACTIVE_TIME_SETTING, settings.getAsTime(INDICES_INACTIVE_TIME_SETTING, TimeValue.timeValueMinutes(5))); @@ -234,7 +235,10 @@ public class IndexShard extends AbstractIndexShardComponent { this.threadPool = provider.getThreadPool(); this.mapperService = mapperService; this.indexCache = indexCache; - this.indexingService = new ShardIndexingService(shardId, indexSettings); + this.internalIndexingStats = new InternalIndexingStats(); + final List listenersList = new ArrayList<>(Arrays.asList(listeners)); + listenersList.add(internalIndexingStats); + this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listenersList, logger); this.getService = new ShardGetService(indexSettings, this, mapperService); this.termVectorsService = provider.getTermVectorsService(); this.searchService = new ShardSearchStats(settings); @@ -270,12 +274,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.provider = provider; this.searcherWrapper = indexSearcherWrapper; - this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, indexingService, mapperService, newQueryShardContext(), indexFieldDataService); - if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { - percolatorQueriesRegistry.enableRealTimePercolator(); - } - - + this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext()); // We start up inactive active.set(false); } @@ -293,10 +292,6 @@ public class IndexShard extends AbstractIndexShardComponent { return true; } - public ShardIndexingService indexingService() { - return this.indexingService; - } - public ShardGetService getService() { return this.getService; } @@ -510,19 +505,24 @@ public class IndexShard extends AbstractIndexShardComponent { public boolean index(Engine.Index index) { ensureWriteAllowed(index); markLastWrite(); - index = indexingService.preIndex(index); + index = indexingOperationListeners.preIndex(index); final boolean created; try { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - created = getEngine().index(index); + final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(index); + Engine engine = getEngine(); + created = engine.index(index); + if (isPercolatorQuery) { + percolatorQueriesRegistry.updatePercolateQuery(engine, index.id()); + } index.endTime(System.nanoTime()); } catch (Throwable ex) { - indexingService.postIndex(index, ex); + indexingOperationListeners.postIndex(index, ex); throw ex; } - indexingService.postIndex(index); + indexingOperationListeners.postIndex(index); return created; } @@ -553,18 +553,23 @@ public class IndexShard extends AbstractIndexShardComponent { public void delete(Engine.Delete delete) { ensureWriteAllowed(delete); markLastWrite(); - delete = indexingService.preDelete(delete); + delete = indexingOperationListeners.preDelete(delete); try { if (logger.isTraceEnabled()) { logger.trace("delete [{}]", delete.uid().text()); } - getEngine().delete(delete); + final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(delete); + Engine engine = getEngine(); + engine.delete(delete); + if (isPercolatorQuery) { + percolatorQueriesRegistry.updatePercolateQuery(engine, delete.id()); + } delete.endTime(System.nanoTime()); } catch (Throwable ex) { - indexingService.postDelete(delete, ex); + indexingOperationListeners.postDelete(delete, ex); throw ex; } - indexingService.postDelete(delete); + indexingOperationListeners.postDelete(delete); } public Engine.GetResult get(Engine.Get get) { @@ -615,7 +620,17 @@ public class IndexShard extends AbstractIndexShardComponent { } public IndexingStats indexingStats(String... types) { - return indexingService.stats(types); + Engine engine = getEngineOrNull(); + final boolean throttled; + final long throttleTimeInMillis; + if (engine == null) { + throttled = false; + throttleTimeInMillis = 0; + } else { + throttled = engine.isThrottled(); + throttleTimeInMillis = engine.getIndexThrottleTimeInMillis(); + } + return internalIndexingStats.stats(throttled, throttleTimeInMillis, types); } public SearchStats searchStats(String... groups) { @@ -1241,7 +1256,6 @@ public class IndexShard extends AbstractIndexShardComponent { } mergePolicyConfig.onRefreshSettings(settings); searchService.onRefreshSettings(settings); - indexingService.onRefreshSettings(settings); if (change) { getEngine().onSettingsChanged(); } @@ -1277,6 +1291,14 @@ public class IndexShard extends AbstractIndexShardComponent { return inactiveTime; } + /** + * Should be called for each no-op update operation to increment relevant statistics. + * @param type the doc type of the update + */ + public void noopUpdate(String type) { + internalIndexingStats.noopUpdate(type); + } + class EngineRefresher implements Runnable { @Override public void run() { @@ -1501,7 +1523,7 @@ public class IndexShard extends AbstractIndexShardComponent { }; final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel); return new EngineConfig(shardId, - threadPool, indexingService, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, + threadPool, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, inactiveTime); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java new file mode 100644 index 00000000000..e5d3574223a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.index.engine.Engine; + +import java.util.List; + +/** + * An indexing listener for indexing, delete, events. + */ +public interface IndexingOperationListener { + + /** + * Called before the indexing occurs. + */ + default Engine.Index preIndex(Engine.Index operation) { + return operation; + } + + /** + * Called after the indexing operation occurred. + */ + default void postIndex(Engine.Index index) {} + + /** + * Called after the indexing operation occurred with exception. + */ + default void postIndex(Engine.Index index, Throwable ex) {} + + /** + * Called before the delete occurs. + */ + default Engine.Delete preDelete(Engine.Delete delete) { + return delete; + } + + + /** + * Called after the delete operation occurred. + */ + default void postDelete(Engine.Delete delete) {} + + /** + * Called after the delete operation occurred with exception. + */ + default void postDelete(Engine.Delete delete, Throwable ex) {} + + /** + * A Composite listener that multiplexes calls to each of the listeners methods. + */ + final class CompositeListener implements IndexingOperationListener{ + private final List listeners; + private final ESLogger logger; + + public CompositeListener(List listeners, ESLogger logger) { + this.listeners = listeners; + this.logger = logger; + } + + @Override + public Engine.Index preIndex(Engine.Index operation) { + assert operation != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.preIndex(operation); + } catch (Throwable t) { + logger.warn("preIndex listener [{}] failed", t, listener); + } + } + return operation; + } + + @Override + public void postIndex(Engine.Index index) { + assert index != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.postIndex(index); + } catch (Throwable t) { + logger.warn("postIndex listener [{}] failed", t, listener); + } + } + } + + @Override + public void postIndex(Engine.Index index, Throwable ex) { + assert index != null && ex != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.postIndex(index, ex); + } catch (Throwable t) { + logger.warn("postIndex listener [{}] failed", t, listener); + } + } + } + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + assert delete != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.preDelete(delete); + } catch (Throwable t) { + logger.warn("preDelete listener [{}] failed", t, listener); + } + } + return delete; + } + + @Override + public void postDelete(Engine.Delete delete) { + assert delete != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.postDelete(delete); + } catch (Throwable t) { + logger.warn("postDelete listener [{}] failed", t, listener); + } + } + } + + @Override + public void postDelete(Engine.Delete delete, Throwable ex) { + assert delete != null && ex != null; + for (IndexingOperationListener listener : listeners) { + try { + listener.postDelete(delete, ex); + } catch (Throwable t) { + logger.warn("postDelete listener [{}] failed", t, listener); + } + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java similarity index 99% rename from core/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java rename to core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index 07ca8af17e3..27cda2ca1c8 100644 --- a/core/src/main/java/org/elasticsearch/index/indexing/IndexingStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.indexing; +package org.elasticsearch.index.shard; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; diff --git a/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java new file mode 100644 index 00000000000..ce8c8140ce0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.index.engine.Engine; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyMap; + +/** + * Internal class that maintains relevant indexing statistics / metrics. + * @see IndexShard + */ +final class InternalIndexingStats implements IndexingOperationListener { + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map typesStats = emptyMap(); + + /** + * Returns the stats, including type specific stats. If the types are null/0 length, then nothing + * is returned for them. If they are set, then only types provided will be returned, or + * _all for all types. + */ + IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... types) { + IndexingStats.Stats total = totalStats.stats(isThrottled, currentThrottleInMillis); + Map typesSt = null; + if (types != null && types.length > 0) { + typesSt = new HashMap<>(typesStats.size()); + if (types.length == 1 && types[0].equals("_all")) { + for (Map.Entry entry : typesStats.entrySet()) { + typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis)); + } + } else { + for (Map.Entry entry : typesStats.entrySet()) { + if (Regex.simpleMatch(types, entry.getKey())) { + typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis)); + } + } + } + } + return new IndexingStats(total, typesSt); + } + + @Override + public Engine.Index preIndex(Engine.Index operation) { + totalStats.indexCurrent.inc(); + typeStats(operation.type()).indexCurrent.inc(); + return operation; + } + + @Override + public void postIndex(Engine.Index index) { + long took = index.endTime() - index.startTime(); + totalStats.indexMetric.inc(took); + totalStats.indexCurrent.dec(); + StatsHolder typeStats = typeStats(index.type()); + typeStats.indexMetric.inc(took); + typeStats.indexCurrent.dec(); + } + + @Override + public void postIndex(Engine.Index index, Throwable ex) { + totalStats.indexCurrent.dec(); + typeStats(index.type()).indexCurrent.dec(); + totalStats.indexFailed.inc(); + typeStats(index.type()).indexFailed.inc(); + } + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + totalStats.deleteCurrent.inc(); + typeStats(delete.type()).deleteCurrent.inc(); + return delete; + } + + @Override + public void postDelete(Engine.Delete delete) { + long took = delete.endTime() - delete.startTime(); + totalStats.deleteMetric.inc(took); + totalStats.deleteCurrent.dec(); + StatsHolder typeStats = typeStats(delete.type()); + typeStats.deleteMetric.inc(took); + typeStats.deleteCurrent.dec(); + } + + @Override + public void postDelete(Engine.Delete delete, Throwable ex) { + totalStats.deleteCurrent.dec(); + typeStats(delete.type()).deleteCurrent.dec(); + } + + public void noopUpdate(String type) { + totalStats.noopUpdates.inc(); + typeStats(type).noopUpdates.inc(); + } + + private StatsHolder typeStats(String type) { + StatsHolder stats = typesStats.get(type); + if (stats == null) { + synchronized (this) { + stats = typesStats.get(type); + if (stats == null) { + stats = new StatsHolder(); + typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap(); + } + } + } + return stats; + } + + static class StatsHolder { + private final MeanMetric indexMetric = new MeanMetric(); + private final MeanMetric deleteMetric = new MeanMetric(); + private final CounterMetric indexCurrent = new CounterMetric(); + private final CounterMetric indexFailed = new CounterMetric(); + private final CounterMetric deleteCurrent = new CounterMetric(); + private final CounterMetric noopUpdates = new CounterMetric(); + + IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { + return new IndexingStats.Stats( + indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(), + deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(), + noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis)); + } + + void clear() { + indexMetric.clear(); + deleteMetric.clear(); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 1438d5f49c3..64667a2013a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -70,6 +70,7 @@ public class TranslogRecoveryPerformer { performRecoveryOperation(engine, operation, false); numOps++; } + engine.getTranslog().sync(); } catch (Throwable t) { throw new BatchOperationException(shardId, "failed to apply batch translog operation", numOps, t); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 3c29e0b216f..1faeac2414c 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.IndexShardComponent; import java.io.Closeable; import java.io.EOFException; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -163,6 +164,21 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { if (translogGeneration != null) { final Checkpoint checkpoint = readCheckpoint(); + final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); + final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); + // this is special handling for error condition when we create a new writer but we fail to bake + // the newly written file (generation+1) into the checkpoint. This is still a valid state + // we just need to cleanup before we continue + // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: + // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example + // + // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists + // if not we don't even try to clean it up and wait until we fail creating it + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + if (Files.exists(currentCheckpointFile) // current checkpoint is already copied + && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning + logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); + } this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); if (recoveredTranslogs.isEmpty()) { throw new IllegalStateException("at least one reader must be recovered"); @@ -425,7 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (config.isSyncOnEachOperation()) { current.sync(); } - assert current.assertBytesAtLocation(location, bytes); + assert assertBytesAtLocation(location, bytes); return location; } } catch (AlreadyClosedException | IOException ex) { @@ -439,6 +455,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } + boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { + // tests can override this + ByteBuffer buffer = ByteBuffer.allocate(location.size); + current.readBytes(buffer, location.translogLocation); + return new BytesArray(buffer.array()).equals(expectedBytes); + } + /** * Snapshots the current transaction log allowing to safely iterate over the snapshot. * Snapshots are fixed in time and will not be updated with future operations. diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 026aac4515e..6a4d40ec545 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -69,9 +69,17 @@ public class TranslogWriter extends TranslogReader { totalOffset = lastSyncedOffset; } + static int getHeaderLength(String translogUUID) { + return getHeaderLength(new BytesRef(translogUUID).length); + } + + private static int getHeaderLength(int uuidLength) { + return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT; + } + public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback onClose, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException { final BytesRef ref = new BytesRef(translogUUID); - final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT; + final int headerLength = getHeaderLength(ref.length); final FileChannel channel = channelFactory.open(file); try { // This OutputStreamDataOutput is intentionally not closed because @@ -80,17 +88,14 @@ public class TranslogWriter extends TranslogReader { CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION); out.writeInt(ref.length); out.writeBytes(ref.bytes, ref.offset, ref.length); - channel.force(false); + channel.force(true); writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE); final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize); return writer; } catch (Throwable throwable){ + // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that + // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition IOUtils.closeWhileHandlingException(channel); - try { - Files.delete(file); // remove the file as well - } catch (IOException ex) { - throwable.addSuppressed(ex); - } throw throwable; } } @@ -213,11 +218,6 @@ public class TranslogWriter extends TranslogReader { } } - boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(location.size); - readBytes(buffer, location.translogLocation); - return new BytesArray(buffer.array()).equals(expectedBytes); - } private long getWrittenOffset() throws IOException { return channelReference.getChannel().position(); diff --git a/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java similarity index 89% rename from core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java rename to core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index a72c115835c..d84c3b00255 100644 --- a/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -17,10 +17,9 @@ * under the License. */ -package org.elasticsearch.indices.memory; +package org.elasticsearch.indices; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,16 +31,16 @@ import org.elasticsearch.index.engine.FlushNotAllowedEngineException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; +import java.io.Closeable; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.concurrent.ScheduledFuture; -public class IndexingMemoryController extends AbstractLifecycleComponent implements IndexEventListener { +public class IndexingMemoryController extends AbstractComponent implements IndexEventListener, Closeable { /** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */ public static final String INDEX_BUFFER_SIZE_SETTING = "indices.memory.index_buffer_size"; @@ -70,10 +69,6 @@ public class IndexingMemoryController extends AbstractLifecycleComponent CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of( IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); private final ShardsIndicesStatusChecker statusChecker; - @Inject - public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) { + IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this(settings, threadPool, indicesService, JvmInfo.jvmInfo().getMem().getHeapMax().bytes()); } // for testing - protected IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService, long jvmMemoryInBytes) { + IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService, long jvmMemoryInBytes) { super(settings); - this.threadPool = threadPool; this.indicesService = indicesService; ByteSizeValue indexingBuffer; @@ -131,29 +124,24 @@ public class IndexingMemoryController extends AbstractLifecycleComponent scheduleTask(ThreadPool threadPool) { // it's fine to run it on the scheduler thread, no busy work - this.scheduler = threadPool.scheduleWithFixedDelay(statusChecker, interval); + return threadPool.scheduleWithFixedDelay(statusChecker, interval); } @Override - protected void doStop() { + public void close() { FutureUtils.cancel(scheduler); - scheduler = null; - } - - @Override - protected void doClose() { } /** * returns the current budget for the total amount of indexing buffers of * active shards on this node */ - public ByteSizeValue indexingBufferSize() { + ByteSizeValue indexingBufferSize() { return indexingBuffer; } @@ -188,7 +176,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent i private final OldShardsStats oldShardsStats = new OldShardsStats(); private final IndexStoreConfig indexStoreConfig; private final MapperRegistry mapperRegistry; + private final IndexingMemoryController indexingMemoryController; @Override protected void doStart() { @@ -114,7 +116,7 @@ public class IndicesService extends AbstractLifecycleComponent i public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry, IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, MapperRegistry mapperRegistry) { + ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool) { super(settings); this.pluginsService = pluginsService; this.nodeEnv = nodeEnv; @@ -127,7 +129,7 @@ public class IndicesService extends AbstractLifecycleComponent i this.mapperRegistry = mapperRegistry; clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); - + indexingMemoryController = new IndexingMemoryController(settings, threadPool, this); } @Override @@ -161,7 +163,7 @@ public class IndicesService extends AbstractLifecycleComponent i @Override protected void doClose() { - IOUtils.closeWhileHandlingException(analysisRegistry); + IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController); } /** @@ -293,6 +295,7 @@ public class IndicesService extends AbstractLifecycleComponent i final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry); pluginsService.onIndexModule(indexModule); + indexModule.addIndexEventListener(indexingMemoryController); for (IndexEventListener listener : builtInListeners) { indexModule.addIndexEventListener(listener); } diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index c8142f3d37a..0a036cbd801 100644 --- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.indexing.IndexingStats; +import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.recovery.RecoveryStats; diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 8a213898b6c..9357de7b1eb 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -63,7 +63,6 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoverySource; import org.elasticsearch.indices.recovery.RecoveryState; @@ -130,9 +129,9 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent percolateQueries() { - return percolateQueryRegistry.percolateQueries(); - } - public Query percolateQuery() { return percolateQuery; } @@ -196,6 +208,14 @@ public class PercolateContext extends SearchContext { return hitContext; } + public boolean isOnlyCount() { + return onlyCount; + } + + public Query percolatorTypeFilter(){ + return indexService().mapperService().documentMapper(PercolatorService.TYPE_NAME).typeFilter(); + } + @Override public SearchContextHighlight highlight() { return highlight; @@ -230,7 +250,7 @@ public class PercolateContext extends SearchContext { @Override public MapperService mapperService() { - return indexService.mapperService(); + return mapperService; } @Override @@ -531,7 +551,6 @@ public class PercolateContext extends SearchContext { @Override public SearchContext size(int size) { this.size = size; - this.limit = true; return this; } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java b/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java new file mode 100644 index 00000000000..6733ebd0b3a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java @@ -0,0 +1,224 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.percolate.PercolateShardRequest; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.aggregations.AggregationPhase; +import org.elasticsearch.search.highlight.HighlightPhase; +import org.elasticsearch.search.sort.SortParseElement; + +import java.util.Map; + +import static org.elasticsearch.index.mapper.SourceToParse.source; + +public class PercolateDocumentParser { + + private final HighlightPhase highlightPhase; + private final SortParseElement sortParseElement; + private final AggregationPhase aggregationPhase; + private final MappingUpdatedAction mappingUpdatedAction; + + @Inject + public PercolateDocumentParser(HighlightPhase highlightPhase, SortParseElement sortParseElement, AggregationPhase aggregationPhase, MappingUpdatedAction mappingUpdatedAction) { + this.highlightPhase = highlightPhase; + this.sortParseElement = sortParseElement; + this.aggregationPhase = aggregationPhase; + this.mappingUpdatedAction = mappingUpdatedAction; + } + + public ParsedDocument parse(PercolateShardRequest request, PercolateContext context, MapperService mapperService, QueryShardContext queryShardContext) { + BytesReference source = request.source(); + if (source == null || source.length() == 0) { + if (request.docSource() != null && request.docSource().length() != 0) { + return parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType()); + } else { + return null; + } + } + + // TODO: combine all feature parse elements into one map + Map hlElements = highlightPhase.parseElements(); + Map aggregationElements = aggregationPhase.parseElements(); + + ParsedDocument doc = null; + // Some queries (function_score query when for decay functions) rely on a SearchContext being set: + // We switch types because this context needs to be in the context of the percolate queries in the shard and + // not the in memory percolate doc + String[] previousTypes = context.types(); + context.types(new String[]{PercolatorService.TYPE_NAME}); + try (XContentParser parser = XContentFactory.xContent(source).createParser(source);) { + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + // we need to check the "doc" here, so the next token will be START_OBJECT which is + // the actual document starting + if ("doc".equals(currentFieldName)) { + if (doc != null) { + throw new ElasticsearchParseException("Either specify doc or get, not both"); + } + + DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType()); + String index = context.shardTarget().index(); + doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true)); + if (docMapper.getMapping() != null) { + doc.addDynamicMappingsUpdate(docMapper.getMapping()); + } + if (doc.dynamicMappingsUpdate() != null) { + mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate()); + } + // the document parsing exists the "doc" object, so we need to set the new current field. + currentFieldName = parser.currentName(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + SearchParseElement element = hlElements.get(currentFieldName); + if (element == null) { + element = aggregationElements.get(currentFieldName); + } + + if ("query".equals(currentFieldName)) { + if (context.percolateQuery() != null) { + throw new ElasticsearchParseException("Either specify query or filter, not both"); + } + context.percolateQuery(queryShardContext.parse(parser).query()); + } else if ("filter".equals(currentFieldName)) { + if (context.percolateQuery() != null) { + throw new ElasticsearchParseException("Either specify query or filter, not both"); + } + Query filter = queryShardContext.parseInnerFilter(parser).query(); + context.percolateQuery(new ConstantScoreQuery(filter)); + } else if ("sort".equals(currentFieldName)) { + parseSort(parser, context); + } else if (element != null) { + element.parse(parser, context); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("sort".equals(currentFieldName)) { + parseSort(parser, context); + } + } else if (token == null) { + break; + } else if (token.isValue()) { + if ("size".equals(currentFieldName)) { + context.size(parser.intValue()); + if (context.size() < 0) { + throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size()); + } + } else if ("sort".equals(currentFieldName)) { + parseSort(parser, context); + } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { + context.trackScores(parser.booleanValue()); + } + } + } + + // We need to get the actual source from the request body for highlighting, so parse the request body again + // and only get the doc source. + if (context.highlight() != null) { + parser.close(); + currentFieldName = null; + try (XContentParser parserForHighlighter = XContentFactory.xContent(source).createParser(source)) { + token = parserForHighlighter.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + while ((token = parserForHighlighter.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parserForHighlighter.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("doc".equals(currentFieldName)) { + BytesStreamOutput bStream = new BytesStreamOutput(); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream); + builder.copyCurrentStructure(parserForHighlighter); + builder.close(); + doc.setSource(bStream.bytes()); + break; + } else { + parserForHighlighter.skipChildren(); + } + } else if (token == null) { + break; + } + } + } + } + + } catch (Throwable e) { + throw new ElasticsearchParseException("failed to parse request", e); + } finally { + context.types(previousTypes); + } + + if (request.docSource() != null && request.docSource().length() != 0) { + if (doc != null) { + throw new IllegalArgumentException("Can't specify the document to percolate in the source of the request and as document id"); + } + + doc = parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType()); + } + + if (doc == null) { + throw new IllegalArgumentException("Nothing to percolate"); + } + + return doc; + } + + private void parseSort(XContentParser parser, PercolateContext context) throws Exception { + context.trackScores(true); + sortParseElement.parse(parser, context); + // null, means default sorting by relevancy + if (context.sort() != null) { + throw new ElasticsearchParseException("Only _score desc is supported"); + } + } + + private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, MapperService mapperService, String index, String type) { + try (XContentParser parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc)) { + DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); + ParsedDocument doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true)); + if (doc == null) { + throw new ElasticsearchParseException("No doc to percolate in the request"); + } + if (context.highlight() != null) { + doc.setSource(fetchedDoc); + } + return doc; + } catch (Throwable e) { + throw new ElasticsearchParseException("failed to parse request", e); + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java index fb18c467388..68b8db55e31 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java @@ -27,6 +27,7 @@ public class PercolatorModule extends AbstractModule { @Override protected void configure() { + bind(PercolateDocumentParser.class).asEagerSingleton(); bind(PercolatorService.class).asEagerSingleton(); } } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorQuery.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorQuery.java new file mode 100644 index 00000000000..b3208b4133c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorQuery.java @@ -0,0 +1,250 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.ExtractQueryTermsService; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import static org.apache.lucene.search.BooleanClause.Occur.FILTER; +import static org.apache.lucene.search.BooleanClause.Occur.MUST; + +final class PercolatorQuery extends Query { + + public static final float MATCH_COST = + (1 << 14) // stored field access cost, approximated by the number of bytes in a block + + 1000; // cost of matching the query against the document, arbitrary as it would be really complex to estimate + + static class Builder { + + private final IndexSearcher percolatorIndexSearcher; + private final Map percolatorQueries; + + private Query percolateQuery; + private Query queriesMetaDataQuery; + private final Query percolateTypeQuery; + + /** + * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated + * @param percolatorQueries All the registered percolator queries + * @param percolateTypeQuery A query that identifies all document containing percolator queries + */ + Builder(IndexSearcher percolatorIndexSearcher, Map percolatorQueries, Query percolateTypeQuery) { + this.percolatorIndexSearcher = percolatorIndexSearcher; + this.percolatorQueries = percolatorQueries; + this.percolateTypeQuery = percolateTypeQuery; + } + + /** + * Optionally sets a query that reduces the number of queries to percolate based on custom metadata attached + * on the percolator documents. + */ + void setPercolateQuery(Query percolateQuery) { + this.percolateQuery = percolateQuery; + } + + /** + * Optionally sets a query that reduces the number of queries to percolate based on extracted terms from + * the document to be percolated. + * + * @param extractedTermsFieldName The name of the field to get the extracted terms from + * @param unknownQueryFieldname The field used to mark documents whose queries couldn't all get extracted + */ + void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException { + this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery(percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname); + } + + PercolatorQuery build() { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.add(percolateTypeQuery, FILTER); + if (queriesMetaDataQuery != null) { + builder.add(queriesMetaDataQuery, FILTER); + } + if (percolateQuery != null){ + builder.add(percolateQuery, MUST); + } + return new PercolatorQuery(builder.build(), percolatorIndexSearcher, percolatorQueries); + } + + } + + private final Query percolatorQueriesQuery; + private final IndexSearcher percolatorIndexSearcher; + private final Map percolatorQueries; + + private PercolatorQuery(Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher, Map percolatorQueries) { + this.percolatorQueriesQuery = percolatorQueriesQuery; + this.percolatorIndexSearcher = percolatorIndexSearcher; + this.percolatorQueries = percolatorQueries; + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + if (getBoost() != 1f) { + return super.rewrite(reader); + } + + Query rewritten = percolatorQueriesQuery.rewrite(reader); + if (rewritten != percolatorQueriesQuery) { + return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries); + } else { + return this; + } + } + + @Override + public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + final Weight innerWeight = percolatorQueriesQuery.createWeight(searcher, needsScores); + return new Weight(this) { + @Override + public void extractTerms(Set set) { + } + + @Override + public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException { + Scorer scorer = scorer(leafReaderContext); + if (scorer != null) { + int result = scorer.iterator().advance(docId); + if (result == docId) { + return Explanation.match(scorer.score(), "PercolatorQuery"); + } + } + return Explanation.noMatch("PercolatorQuery"); + } + + @Override + public float getValueForNormalization() throws IOException { + return innerWeight.getValueForNormalization(); + } + + @Override + public void normalize(float v, float v1) { + innerWeight.normalize(v, v1); + } + + @Override + public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException { + final Scorer approximation = innerWeight.scorer(leafReaderContext); + if (approximation == null) { + return null; + } + + final LeafReader leafReader = leafReaderContext.reader(); + return new Scorer(this) { + + @Override + public DocIdSetIterator iterator() { + return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator()); + } + + @Override + public TwoPhaseIterator twoPhaseIterator() { + return new TwoPhaseIterator(approximation.iterator()) { + @Override + public boolean matches() throws IOException { + return matchDocId(approximation.docID(), leafReader); + } + + @Override + public float matchCost() { + return MATCH_COST; + } + }; + } + + @Override + public float score() throws IOException { + return approximation.score(); + } + + @Override + public int freq() throws IOException { + return approximation.freq(); + } + + @Override + public int docID() { + return approximation.docID(); + } + + boolean matchDocId(int docId, LeafReader leafReader) throws IOException { + SingleFieldsVisitor singleFieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); + leafReader.document(docId, singleFieldsVisitor); + BytesRef percolatorQueryId = new BytesRef(singleFieldsVisitor.uid().id()); + return matchQuery(percolatorQueryId); + } + }; + } + }; + } + + boolean matchQuery(BytesRef percolatorQueryId) throws IOException { + Query percolatorQuery = percolatorQueries.get(percolatorQueryId); + if (percolatorQuery != null) { + return Lucene.exists(percolatorIndexSearcher, percolatorQuery); + } else { + return false; + } + } + + private final Object instance = new Object(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + + PercolatorQuery that = (PercolatorQuery) o; + + return instance.equals(that.instance); + + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + instance.hashCode(); + return result; + } + + @Override + public String toString(String s) { + return "PercolatorQuery{inner={" + percolatorQueriesQuery.toString(s) + "}}"; + } +} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 8cc691b866b..e6ffa313e83 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -18,134 +18,110 @@ */ package org.elasticsearch.percolator; -import com.carrotsearch.hppc.IntObjectHashMap; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateShardRequest; import org.elasticsearch.action.percolate.PercolateShardResponse; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.HasContextAndHeaders; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.mapper.DocumentMapperForType; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.percolator.QueryCollector.Count; -import org.elasticsearch.percolator.QueryCollector.Match; -import org.elasticsearch.percolator.QueryCollector.MatchAndScore; -import org.elasticsearch.percolator.QueryCollector.MatchAndSort; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationPhase; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.SortParseElement; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.percolator.QueryCollector.count; -import static org.elasticsearch.percolator.QueryCollector.match; -import static org.elasticsearch.percolator.QueryCollector.matchAndScore; +import static org.apache.lucene.search.BooleanClause.Occur.FILTER; +import static org.apache.lucene.search.BooleanClause.Occur.MUST; public class PercolatorService extends AbstractComponent { public final static float NO_SCORE = Float.NEGATIVE_INFINITY; public final static String TYPE_NAME = ".percolator"; - private final IndexNameExpressionResolver indexNameExpressionResolver; - private final IndicesService indicesService; - private final IntObjectHashMap percolatorTypes; - private final PageCacheRecycler pageCacheRecycler; private final BigArrays bigArrays; + private final ScriptService scriptService; + private final IndicesService indicesService; private final ClusterService clusterService; + private final HighlightPhase highlightPhase; + private final AggregationPhase aggregationPhase; + private final PageCacheRecycler pageCacheRecycler; + private final ParseFieldMatcher parseFieldMatcher; + private final CloseableThreadLocal cache; + private final IndexNameExpressionResolver indexNameExpressionResolver; + private final PercolateDocumentParser percolateDocumentParser; private final PercolatorIndex single; private final PercolatorIndex multi; - private final HighlightPhase highlightPhase; - private final AggregationPhase aggregationPhase; - private final SortParseElement sortParseElement; - private final ScriptService scriptService; - private final MappingUpdatedAction mappingUpdatedAction; - - private final CloseableThreadLocal cache; - - private final ParseFieldMatcher parseFieldMatcher; - @Inject public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, HighlightPhase highlightPhase, ClusterService clusterService, AggregationPhase aggregationPhase, ScriptService scriptService, - MappingUpdatedAction mappingUpdatedAction) { + PercolateDocumentParser percolateDocumentParser) { super(settings); this.indexNameExpressionResolver = indexNameExpressionResolver; + this.percolateDocumentParser = percolateDocumentParser; this.parseFieldMatcher = new ParseFieldMatcher(settings); this.indicesService = indicesService; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays; this.clusterService = clusterService; - this.highlightPhase = highlightPhase; - this.aggregationPhase = aggregationPhase; this.scriptService = scriptService; - this.mappingUpdatedAction = mappingUpdatedAction; - this.sortParseElement = new SortParseElement(); + this.aggregationPhase = aggregationPhase; + this.highlightPhase = highlightPhase; final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes(); cache = new CloseableThreadLocal() { @@ -157,23 +133,41 @@ public class PercolatorService extends AbstractComponent { }; single = new SingleDocumentPercolatorIndex(cache); multi = new MultiDocumentPercolatorIndex(cache); - - percolatorTypes = new IntObjectHashMap<>(6); - percolatorTypes.put(countPercolator.id(), countPercolator); - percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator); - percolatorTypes.put(matchPercolator.id(), matchPercolator); - percolatorTypes.put(queryPercolator.id(), queryPercolator); - percolatorTypes.put(scoringPercolator.id(), scoringPercolator); - percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator); } + public ReduceResult reduce(boolean onlyCount, List shardResponses, HasContextAndHeaders headersContext) throws IOException { + if (onlyCount) { + long finalCount = 0; + for (PercolateShardResponse shardResponse : shardResponses) { + finalCount += shardResponse.topDocs().totalHits; + } - public ReduceResult reduce(byte percolatorTypeId, List shardResults, HasContextAndHeaders headersContext) { - PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId); - return percolatorType.reduce(shardResults, headersContext); + InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext); + return new PercolatorService.ReduceResult(finalCount, reducedAggregations); + } else { + int requestedSize = shardResponses.get(0).requestedSize(); + TopDocs[] shardResults = new TopDocs[shardResponses.size()]; + long foundMatches = 0; + for (int i = 0; i < shardResults.length; i++) { + TopDocs shardResult = shardResponses.get(i).topDocs(); + foundMatches += shardResult.totalHits; + shardResults[i] = shardResult; + } + TopDocs merged = TopDocs.merge(requestedSize, shardResults); + PercolateResponse.Match[] matches = new PercolateResponse.Match[merged.scoreDocs.length]; + for (int i = 0; i < merged.scoreDocs.length; i++) { + ScoreDoc doc = merged.scoreDocs[i]; + PercolateShardResponse shardResponse = shardResponses.get(doc.shardIndex); + String id = shardResponse.ids().get(doc.doc); + Map hl = shardResponse.hls().get(doc.doc); + matches[i] = new PercolateResponse.Match(new Text(shardResponse.getIndex()), new Text(id), doc.score, hl); + } + InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext); + return new PercolatorService.ReduceResult(foundMatches, matches, reducedAggregations); + } } - public PercolateShardResponse percolate(PercolateShardRequest request) { + public PercolateShardResponse percolate(PercolateShardRequest request) throws IOException { IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = percolateIndexService.getShard(request.shardId().id()); indexShard.readAllowed(); // check if we can read the shard... @@ -196,29 +190,11 @@ public class PercolatorService extends AbstractComponent { ); SearchContext.setCurrent(context); try { - ParsedDocument parsedDocument = parseRequest(indexShard, request, context, request.shardId().getIndex()); - if (context.percolateQueries().isEmpty()) { - return new PercolateShardResponse(context, request.shardId()); - } + ParsedDocument parsedDocument = percolateDocumentParser.parse(request, context, percolateIndexService.mapperService(), percolateIndexService.getQueryShardContext()); - if (request.docSource() != null && request.docSource().length() != 0) { - parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.shardId().getIndex(), request.documentType()); - } else if (parsedDocument == null) { - throw new IllegalArgumentException("Nothing to percolate"); + if (context.searcher().getIndexReader().maxDoc() == 0) { + return new PercolateShardResponse(Lucene.EMPTY_TOP_DOCS, Collections.emptyMap(), Collections.emptyMap(), context); } - - if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) { - context.percolateQuery(new MatchAllDocsQuery()); - } - - if (context.doSort && !context.limit) { - throw new IllegalArgumentException("Can't sort if size isn't specified"); - } - - if (context.highlight() != null && !context.limit) { - throw new IllegalArgumentException("Can't highlight if size isn't specified"); - } - if (context.size() < 0) { context.size(0); } @@ -232,23 +208,27 @@ public class PercolatorService extends AbstractComponent { } else { percolatorIndex = single; } - - PercolatorType action; - if (request.onlyCount()) { - action = context.percolateQuery() != null ? queryCountPercolator : countPercolator; - } else { - if (context.doSort) { - action = topMatchingPercolator; - } else if (context.percolateQuery() != null) { - action = context.trackScores() ? scoringPercolator : queryPercolator; - } else { - action = matchPercolator; - } - } - context.percolatorTypeId = action.id(); - percolatorIndex.prepare(context, parsedDocument); - return action.doPercolate(request, context, isNested); + + BucketCollector aggregatorCollector = null; + if (context.aggregations() != null) { + AggregationContext aggregationContext = new AggregationContext(context); + context.aggregations().aggregationContext(aggregationContext); + + Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext); + List aggregatorCollectors = new ArrayList<>(aggregators.length); + for (int i = 0; i < aggregators.length; i++) { + if (!(aggregators[i] instanceof GlobalAggregator)) { + Aggregator aggregator = aggregators[i]; + aggregatorCollectors.add(aggregator); + } + } + context.aggregations().aggregators(aggregators); + aggregatorCollector = BucketCollector.wrap(aggregatorCollectors); + aggregatorCollector.preCollection(); + } + PercolatorQueriesRegistry queriesRegistry = indexShard.percolateRegistry(); + return doPercolate(context, queriesRegistry, aggregationPhase, aggregatorCollector, highlightPhase); } finally { SearchContext.removeCurrent(); context.close(); @@ -256,566 +236,101 @@ public class PercolatorService extends AbstractComponent { } } - private ParsedDocument parseRequest(IndexShard shard, PercolateShardRequest request, PercolateContext context, String index) { - BytesReference source = request.source(); - if (source == null || source.length() == 0) { - return null; + // moved the core percolation logic to a pck protected method to make testing easier: + static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException { + PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter()); + if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_3_0_0)) { + builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME); } - - // TODO: combine all feature parse elements into one map - Map hlElements = highlightPhase.parseElements(); - Map aggregationElements = aggregationPhase.parseElements(); - - ParsedDocument doc = null; - XContentParser parser = null; - - // Some queries (function_score query when for decay functions) rely on a SearchContext being set: - // We switch types because this context needs to be in the context of the percolate queries in the shard and - // not the in memory percolate doc - String[] previousTypes = context.types(); - context.types(new String[]{TYPE_NAME}); - QueryShardContext queryShardContext = shard.getQueryShardContext(); - try { - parser = XContentFactory.xContent(source).createParser(source); - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - // we need to check the "doc" here, so the next token will be START_OBJECT which is - // the actual document starting - if ("doc".equals(currentFieldName)) { - if (doc != null) { - throw new ElasticsearchParseException("Either specify doc or get, not both"); - } - - MapperService mapperService = shard.mapperService(); - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType()); - doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true)); - if (docMapper.getMapping() != null) { - doc.addDynamicMappingsUpdate(docMapper.getMapping()); - } - if (doc.dynamicMappingsUpdate() != null) { - mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate()); - } - // the document parsing exists the "doc" object, so we need to set the new current field. - currentFieldName = parser.currentName(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - SearchParseElement element = hlElements.get(currentFieldName); - if (element == null) { - element = aggregationElements.get(currentFieldName); - } - - if ("query".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - context.percolateQuery(queryShardContext.parse(parser).query()); - } else if ("filter".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - Query filter = queryShardContext.parseInnerFilter(parser).query(); - context.percolateQuery(new ConstantScoreQuery(filter)); - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if (element != null) { - element.parse(parser, context); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } - } else if (token == null) { - break; - } else if (token.isValue()) { - if ("size".equals(currentFieldName)) { - context.size(parser.intValue()); - if (context.size() < 0) { - throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size()); - } - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { - context.trackScores(parser.booleanValue()); - } - } + if (context.percolateQuery() != null || context.aliasFilter() != null) { + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + if (context.percolateQuery() != null) { + bq.add(context.percolateQuery(), MUST); } - - // We need to get the actual source from the request body for highlighting, so parse the request body again - // and only get the doc source. - if (context.highlight() != null) { - parser.close(); - currentFieldName = null; - parser = XContentFactory.xContent(source).createParser(source); - token = parser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("doc".equals(currentFieldName)) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream); - builder.copyCurrentStructure(parser); - builder.close(); - doc.setSource(bStream.bytes()); - break; - } else { - parser.skipChildren(); - } - } else if (token == null) { - break; - } - } - } - - } catch (Throwable e) { - throw new ElasticsearchParseException("failed to parse request", e); - } finally { - context.types(previousTypes); - if (parser != null) { - parser.close(); + if (context.aliasFilter() != null) { + bq.add(context.aliasFilter(), FILTER); } + builder.setPercolateQuery(bq.build()); } + PercolatorQuery percolatorQuery = builder.build(); - return doc; - } - - private void parseSort(XContentParser parser, PercolateContext context) throws Exception { - sortParseElement.parse(parser, context); - // null, means default sorting by relevancy - if (context.sort() == null) { - context.doSort = true; + if (context.isOnlyCount() || context.size() == 0) { + TotalHitCountCollector collector = new TotalHitCountCollector(); + context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); + if (aggregatorCollector != null) { + aggregatorCollector.postCollection(); + aggregationPhase.execute(context); + } + return new PercolateShardResponse(new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0f), Collections.emptyMap(), Collections.emptyMap(), context); } else { - throw new ElasticsearchParseException("Only _score desc is supported"); - } - } - - private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, IndexService documentIndexService, String index, String type) { - ParsedDocument doc = null; - XContentParser parser = null; - try { - parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc); - MapperService mapperService = documentIndexService.mapperService(); - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); - doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true)); - - if (context.highlight() != null) { - doc.setSource(fetchedDoc); + int size = context.size(); + if (size > context.searcher().getIndexReader().maxDoc()) { + // prevent easy OOM if more than the total number of docs that exist is requested... + size = context.searcher().getIndexReader().maxDoc(); } - } catch (Throwable e) { - throw new ElasticsearchParseException("failed to parse request", e); - } finally { - if (parser != null) { - parser.close(); + TopScoreDocCollector collector = TopScoreDocCollector.create(size); + context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); + if (aggregatorCollector != null) { + aggregatorCollector.postCollection(); + aggregationPhase.execute(context); } - } - if (doc == null) { - throw new ElasticsearchParseException("No doc to percolate in the request"); - } + TopDocs topDocs = collector.topDocs(); + Map ids = new HashMap<>(topDocs.scoreDocs.length); + Map> hls = new HashMap<>(topDocs.scoreDocs.length); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + if (context.trackScores() == false) { + // No sort or tracking scores was provided, so use special value to indicate to not show the scores: + scoreDoc.score = NO_SCORE; + } - return doc; + int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves()); + LeafReaderContext atomicReaderContext = context.searcher().getIndexReader().leaves().get(segmentIdx); + final int segmentDocId = scoreDoc.doc - atomicReaderContext.docBase; + SingleFieldsVisitor fieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); + atomicReaderContext.reader().document(segmentDocId, fieldsVisitor); + String id = fieldsVisitor.uid().id(); + ids.put(scoreDoc.doc, id); + if (context.highlight() != null) { + Query query = queriesRegistry.getPercolateQueries().get(new BytesRef(id)); + context.parsedQuery(new ParsedQuery(query)); + context.hitContext().cache().clear(); + highlightPhase.hitExecute(context, context.hitContext()); + hls.put(scoreDoc.doc, context.hitContext().hit().getHighlightFields()); + } + } + return new PercolateShardResponse(topDocs, ids, hls, context); + } } public void close() { cache.close(); } - interface PercolatorType { - - // 0x00 is reserved for empty type. - byte id(); - - ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext); - - PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested); - - } - - private final PercolatorType countPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x01; + private InternalAggregations reduceAggregations(List shardResults, HasContextAndHeaders headersContext) { + if (shardResults.get(0).aggregations() == null) { + return null; } - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - long finalCount = 0; - for (PercolateShardResponse shardResponse : shardResults) { - finalCount += shardResponse.count(); - } - - assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); - return new ReduceResult(finalCount, reducedAggregations); + List aggregationsList = new ArrayList<>(shardResults.size()); + for (PercolateShardResponse shardResult : shardResults) { + aggregationsList.add(shardResult.aggregations()); } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - long count = 0; - for (Map.Entry entry : context.percolateQueries().entrySet()) { - try { - Query existsQuery = entry.getValue(); - if (isNested) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - if (Lucene.exists(context.docSearcher(), existsQuery)) { - count ++; - } - } catch (Throwable e) { - logger.debug("[" + entry.getKey() + "] failed to execute query", e); - throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); + InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext)); + if (aggregations != null) { + List pipelineAggregators = shardResults.get(0).pipelineAggregators(); + if (pipelineAggregators != null) { + List newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> { + return (InternalAggregation) p; + }).collect(Collectors.toList()); + for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext)); + newAggs.add(newAgg); } - } - return new PercolateShardResponse(count, context, request.shardId()); - } - - }; - - private final PercolatorType queryCountPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x02; - } - - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - return countPercolator.reduce(shardResults, headersContext); - } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - long count = 0; - Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); - try { - Count countCollector = count(logger, context, isNested); - queryBasedPercolating(percolatorSearcher, context, countCollector); - count = countCollector.counter(); - } catch (Throwable e) { - logger.warn("failed to execute", e); - } finally { - percolatorSearcher.close(); - } - return new PercolateShardResponse(count, context, request.shardId()); - } - - }; - - private final PercolatorType matchPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x03; - } - - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - long foundMatches = 0; - int numMatches = 0; - for (PercolateShardResponse response : shardResults) { - foundMatches += response.count(); - numMatches += response.matches().length; - } - int requestedSize = shardResults.get(0).requestedSize(); - - // Use a custom impl of AbstractBigArray for Object[]? - List finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize); - outer: - for (PercolateShardResponse response : shardResults) { - Text index = new Text(response.getIndex()); - for (int i = 0; i < response.matches().length; i++) { - float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i]; - Text match = new Text(new BytesArray(response.matches()[i])); - Map hl = response.hls().isEmpty() ? null : response.hls().get(i); - finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); - if (requestedSize != 0 && finalMatches.size() == requestedSize) { - break outer; - } - } - } - - assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); - return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); - } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - long count = 0; - List matches = new ArrayList<>(); - List> hls = new ArrayList<>(); - - for (Map.Entry entry : context.percolateQueries().entrySet()) { - if (context.highlight() != null) { - context.parsedQuery(new ParsedQuery(entry.getValue())); - context.hitContext().cache().clear(); - } - try { - Query existsQuery = entry.getValue(); - if (isNested) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - if (Lucene.exists(context.docSearcher(), existsQuery)) { - if (!context.limit || count < context.size()) { - matches.add(entry.getKey()); - if (context.highlight() != null) { - highlightPhase.hitExecute(context, context.hitContext()); - hls.add(context.hitContext().hit().getHighlightFields()); - } - } - count++; - } - } catch (Throwable e) { - logger.debug("[" + entry.getKey() + "] failed to execute query", e); - throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); - } - } - - BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); - return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId()); - } - }; - - private final PercolatorType queryPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x04; - } - - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - return matchPercolator.reduce(shardResults, headersContext); - } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); - try { - Match match = match(logger, context, highlightPhase, isNested); - queryBasedPercolating(percolatorSearcher, context, match); - List matches = match.matches(); - List> hls = match.hls(); - long count = match.counter(); - - BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); - return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId()); - } catch (Throwable e) { - logger.debug("failed to execute", e); - throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); - } finally { - percolatorSearcher.close(); + aggregations = new InternalAggregations(newAggs); } } - }; - - private final PercolatorType scoringPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x05; - } - - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - return matchPercolator.reduce(shardResults, headersContext); - } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); - try { - MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase, isNested); - queryBasedPercolating(percolatorSearcher, context, matchAndScore); - List matches = matchAndScore.matches(); - List> hls = matchAndScore.hls(); - float[] scores = matchAndScore.scores().toArray(); - long count = matchAndScore.counter(); - - BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); - return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.shardId()); - } catch (Throwable e) { - logger.debug("failed to execute", e); - throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); - } finally { - percolatorSearcher.close(); - } - } - }; - - private final PercolatorType topMatchingPercolator = new PercolatorType() { - - @Override - public byte id() { - return 0x06; - } - - @Override - public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { - long foundMatches = 0; - int nonEmptyResponses = 0; - int firstNonEmptyIndex = 0; - for (int i = 0; i < shardResults.size(); i++) { - PercolateShardResponse response = shardResults.get(i); - foundMatches += response.count(); - if (response.matches().length != 0) { - if (firstNonEmptyIndex == 0) { - firstNonEmptyIndex = i; - } - nonEmptyResponses++; - } - } - - int requestedSize = shardResults.get(0).requestedSize(); - - // Use a custom impl of AbstractBigArray for Object[]? - List finalMatches = new ArrayList<>(requestedSize); - if (nonEmptyResponses == 1) { - PercolateShardResponse response = shardResults.get(firstNonEmptyIndex); - Text index = new Text(response.getIndex()); - for (int i = 0; i < response.matches().length; i++) { - float score = response.scores().length == 0 ? Float.NaN : response.scores()[i]; - Text match = new Text(new BytesArray(response.matches()[i])); - if (!response.hls().isEmpty()) { - Map hl = response.hls().get(i); - finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); - } else { - finalMatches.add(new PercolateResponse.Match(index, match, score)); - } - } - } else { - int[] slots = new int[shardResults.size()]; - while (true) { - float lowestScore = Float.NEGATIVE_INFINITY; - int requestIndex = -1; - int itemIndex = -1; - for (int i = 0; i < shardResults.size(); i++) { - int scoreIndex = slots[i]; - float[] scores = shardResults.get(i).scores(); - if (scoreIndex >= scores.length) { - continue; - } - - float score = scores[scoreIndex]; - int cmp = Float.compare(lowestScore, score); - // TODO: Maybe add a tie? - if (cmp < 0) { - requestIndex = i; - itemIndex = scoreIndex; - lowestScore = score; - } - } - - // This means the shard matches have been exhausted and we should bail - if (requestIndex == -1) { - break; - } - - slots[requestIndex]++; - - PercolateShardResponse shardResponse = shardResults.get(requestIndex); - Text index = new Text(shardResponse.getIndex()); - Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex])); - float score = shardResponse.scores()[itemIndex]; - if (!shardResponse.hls().isEmpty()) { - Map hl = shardResponse.hls().get(itemIndex); - finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); - } else { - finalMatches.add(new PercolateResponse.Match(index, match, score)); - } - if (finalMatches.size() == requestedSize) { - break; - } - } - } - - assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); - return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); - } - - @Override - public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { - Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); - try { - MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context, isNested); - queryBasedPercolating(percolatorSearcher, context, matchAndSort); - TopDocs topDocs = matchAndSort.topDocs(); - long count = topDocs.totalHits; - List matches = new ArrayList<>(topDocs.scoreDocs.length); - float[] scores = new float[topDocs.scoreDocs.length]; - List> hls = null; - if (context.highlight() != null) { - hls = new ArrayList<>(topDocs.scoreDocs.length); - } - - final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME); - final IndexFieldData uidFieldData = context.fieldData().getForField(uidMapper); - int i = 0; - for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves()); - LeafReaderContext atomicReaderContext = percolatorSearcher.reader().leaves().get(segmentIdx); - SortedBinaryDocValues values = uidFieldData.load(atomicReaderContext).getBytesValues(); - final int localDocId = scoreDoc.doc - atomicReaderContext.docBase; - values.setDocument(localDocId); - final int numValues = values.count(); - assert numValues == 1; - BytesRef bytes = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1]; - matches.add(BytesRef.deepCopyOf(bytes)); - if (hls != null) { - Query query = context.percolateQueries().get(bytes); - context.parsedQuery(new ParsedQuery(query)); - context.hitContext().cache().clear(); - highlightPhase.hitExecute(context, context.hitContext()); - hls.add(i, context.hitContext().hit().getHighlightFields()); - } - scores[i++] = scoreDoc.score; - } - if (hls != null) { - return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), hls, count, scores, context, request.shardId()); - } else { - return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), count, scores, context, request.shardId()); - } - } catch (Throwable e) { - logger.debug("failed to execute", e); - throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); - } finally { - percolatorSearcher.close(); - } - } - - }; - - private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { - Query percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); - - final Query filter; - if (context.aliasFilter() != null) { - BooleanQuery.Builder booleanFilter = new BooleanQuery.Builder(); - booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST); - booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST); - filter = booleanFilter.build(); - } else { - filter = percolatorTypeFilter; - } - - Query query = Queries.filtered(context.percolateQuery(), filter); - percolatorSearcher.searcher().search(query, percolateCollector); - percolateCollector.aggregatorCollector.postCollection(); - if (context.aggregations() != null) { - aggregationPhase.execute(context); - } + return aggregations; } public final static class ReduceResult { @@ -849,32 +364,5 @@ public class PercolatorService extends AbstractComponent { } } - private InternalAggregations reduceAggregations(List shardResults, HasContextAndHeaders headersContext) { - if (shardResults.get(0).aggregations() == null) { - return null; - } - - List aggregationsList = new ArrayList<>(shardResults.size()); - for (PercolateShardResponse shardResult : shardResults) { - aggregationsList.add(shardResult.aggregations()); - } - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService, - headersContext)); - if (aggregations != null) { - List pipelineAggregators = shardResults.get(0).pipelineAggregators(); - if (pipelineAggregators != null) { - List newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext( - bigArrays, scriptService, headersContext)); - newAggs.add(newAgg); - } - aggregations = new InternalAggregations(newAggs); - } - } - return aggregations; - } } diff --git a/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java b/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java deleted file mode 100644 index 828ff4f08e4..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import com.carrotsearch.hppc.FloatArrayList; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.SimpleCollector; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.highlight.HighlightField; -import org.elasticsearch.search.highlight.HighlightPhase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; - -/** - */ -abstract class QueryCollector extends SimpleCollector { - - final IndexFieldData uidFieldData; - final IndexSearcher searcher; - final ConcurrentMap queries; - final ESLogger logger; - boolean isNestedDoc = false; - - BytesRef current; - - SortedBinaryDocValues values; - - final BucketCollector aggregatorCollector; - LeafCollector aggregatorLeafCollector; - - QueryCollector(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException { - this.logger = logger; - this.queries = context.percolateQueries(); - this.searcher = context.docSearcher(); - final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME); - this.uidFieldData = context.fieldData().getForField(uidMapper); - this.isNestedDoc = isNestedDoc; - - List aggregatorCollectors = new ArrayList<>(); - - if (context.aggregations() != null) { - AggregationContext aggregationContext = new AggregationContext(context); - context.aggregations().aggregationContext(aggregationContext); - - Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext); - for (int i = 0; i < aggregators.length; i++) { - if (!(aggregators[i] instanceof GlobalAggregator)) { - Aggregator aggregator = aggregators[i]; - aggregatorCollectors.add(aggregator); - } - } - context.aggregations().aggregators(aggregators); - } - aggregatorCollector = BucketCollector.wrap(aggregatorCollectors); - aggregatorCollector.preCollection(); - } - - public void postMatch(int doc) throws IOException { - aggregatorLeafCollector.collect(doc); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - aggregatorLeafCollector.setScorer(scorer); - } - - @Override - public boolean needsScores() { - return aggregatorCollector.needsScores(); - } - - @Override - public void doSetNextReader(LeafReaderContext context) throws IOException { - // we use the UID because id might not be indexed - values = uidFieldData.load(context).getBytesValues(); - aggregatorLeafCollector = aggregatorCollector.getLeafCollector(context); - } - - static Match match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException { - return new Match(logger, context, highlightPhase, isNestedDoc); - } - - static Count count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException { - return new Count(logger, context, isNestedDoc); - } - - static MatchAndScore matchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException { - return new MatchAndScore(logger, context, highlightPhase, isNestedDoc); - } - - static MatchAndSort matchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException { - return new MatchAndSort(logger, context, isNestedDoc); - } - - - protected final Query getQuery(int doc) { - values.setDocument(doc); - final int numValues = values.count(); - if (numValues == 0) { - return null; - } - assert numValues == 1; - current = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1]; - return queries.get(current); - } - - - - final static class Match extends QueryCollector { - - final PercolateContext context; - final HighlightPhase highlightPhase; - - final List matches = new ArrayList<>(); - final List> hls = new ArrayList<>(); - final boolean limit; - final int size; - long counter = 0; - - Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException { - super(logger, context, isNestedDoc); - this.limit = context.limit; - this.size = context.size(); - this.context = context; - this.highlightPhase = highlightPhase; - } - - @Override - public void collect(int doc) throws IOException { - final Query query = getQuery(doc); - if (query == null) { - // log??? - return; - } - Query existsQuery = query; - if (isNestedDoc) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - // run the query - try { - if (context.highlight() != null) { - context.parsedQuery(new ParsedQuery(query)); - context.hitContext().cache().clear(); - } - - if (Lucene.exists(searcher, existsQuery)) { - if (!limit || counter < size) { - matches.add(BytesRef.deepCopyOf(current)); - if (context.highlight() != null) { - highlightPhase.hitExecute(context, context.hitContext()); - hls.add(context.hitContext().hit().getHighlightFields()); - } - } - counter++; - postMatch(doc); - } - } catch (IOException e) { - logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); - } - } - - long counter() { - return counter; - } - - List matches() { - return matches; - } - - List> hls() { - return hls; - } - } - - final static class MatchAndSort extends QueryCollector { - - private final TopScoreDocCollector topDocsCollector; - private LeafCollector topDocsLeafCollector; - - MatchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException { - super(logger, context, isNestedDoc); - // TODO: Use TopFieldCollector.create(...) for ascending and descending scoring? - topDocsCollector = TopScoreDocCollector.create(context.size()); - } - - @Override - public boolean needsScores() { - return super.needsScores() || topDocsCollector.needsScores(); - } - - @Override - public void collect(int doc) throws IOException { - final Query query = getQuery(doc); - if (query == null) { - // log??? - return; - } - Query existsQuery = query; - if (isNestedDoc) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - // run the query - try { - if (Lucene.exists(searcher, existsQuery)) { - topDocsLeafCollector.collect(doc); - postMatch(doc); - } - } catch (IOException e) { - logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); - } - } - - @Override - public void doSetNextReader(LeafReaderContext context) throws IOException { - super.doSetNextReader(context); - topDocsLeafCollector = topDocsCollector.getLeafCollector(context); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - topDocsLeafCollector.setScorer(scorer); - } - - TopDocs topDocs() { - return topDocsCollector.topDocs(); - } - - } - - final static class MatchAndScore extends QueryCollector { - - final PercolateContext context; - final HighlightPhase highlightPhase; - - final List matches = new ArrayList<>(); - final List> hls = new ArrayList<>(); - // TODO: Use thread local in order to cache the scores lists? - final FloatArrayList scores = new FloatArrayList(); - final boolean limit; - final int size; - long counter = 0; - - private Scorer scorer; - - MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException { - super(logger, context, isNestedDoc); - this.limit = context.limit; - this.size = context.size(); - this.context = context; - this.highlightPhase = highlightPhase; - } - - @Override - public boolean needsScores() { - return true; - } - - @Override - public void collect(int doc) throws IOException { - final Query query = getQuery(doc); - if (query == null) { - // log??? - return; - } - Query existsQuery = query; - if (isNestedDoc) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - // run the query - try { - if (context.highlight() != null) { - context.parsedQuery(new ParsedQuery(query)); - context.hitContext().cache().clear(); - } - if (Lucene.exists(searcher, existsQuery)) { - if (!limit || counter < size) { - matches.add(BytesRef.deepCopyOf(current)); - scores.add(scorer.score()); - if (context.highlight() != null) { - highlightPhase.hitExecute(context, context.hitContext()); - hls.add(context.hitContext().hit().getHighlightFields()); - } - } - counter++; - postMatch(doc); - } - } catch (IOException e) { - logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); - } - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - long counter() { - return counter; - } - - List matches() { - return matches; - } - - FloatArrayList scores() { - return scores; - } - - List> hls() { - return hls; - } - } - - final static class Count extends QueryCollector { - - private long counter = 0; - - Count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException { - super(logger, context, isNestedDoc); - } - - @Override - public void collect(int doc) throws IOException { - final Query query = getQuery(doc); - if (query == null) { - // log??? - return; - } - Query existsQuery = query; - if (isNestedDoc) { - existsQuery = new BooleanQuery.Builder() - .add(existsQuery, Occur.MUST) - .add(Queries.newNonNestedFilter(), Occur.FILTER) - .build(); - } - // run the query - try { - if (Lucene.exists(searcher, existsQuery)) { - counter++; - postMatch(doc); - } - } catch (IOException e) { - logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); - } - } - - long counter() { - return counter; - } - - } - -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index 86336ccf971..b7371f7b80e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -40,7 +40,6 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.support.RestBuilderListener; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; import java.util.List; @@ -100,9 +99,6 @@ public class RestGetIndicesAction extends BaseRestHandler { case SETTINGS: writeSettings(response.settings().get(index), builder, request); break; - case WARMERS: - writeWarmers(response.warmers().get(index), builder, request); - break; default: throw new IllegalStateException("feature [" + feature + "] is not valid"); } @@ -142,15 +138,6 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.endObject(); } - private void writeWarmers(List warmers, XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.WARMERS); - if (warmers != null) { - for (IndexWarmersMetaData.Entry warmer : warmers) { - IndexWarmersMetaData.toXContent(warmer, builder, params); - } - } - builder.endObject(); - } }); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java deleted file mode 100644 index 4fe07564031..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.rest.action.admin.indices.warmer.delete; - -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.support.AcknowledgedRestListener; - -import static org.elasticsearch.rest.RestRequest.Method.DELETE; - -/** - */ -public class RestDeleteWarmerAction extends BaseRestHandler { - - @Inject - public RestDeleteWarmerAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - controller.registerHandler(DELETE, "/{index}/_warmer", this); - controller.registerHandler(DELETE, "/{index}/_warmer/{name}", this); - controller.registerHandler(DELETE, "/{index}/_warmers", this); - controller.registerHandler(DELETE, "/{index}/_warmers/{name}", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name"))) - .indices(Strings.splitStringByCommaToArray(request.param("index"))); - deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout())); - deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout())); - deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions())); - client.admin().indices().deleteWarmer(deleteWarmerRequest, new AcknowledgedRestListener(channel)); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java deleted file mode 100644 index 26f1186f550..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.rest.action.admin.indices.warmer.get; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.support.RestBuilderListener; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; - -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * - */ -public class RestGetWarmerAction extends BaseRestHandler { - - @Inject - public RestGetWarmerAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - controller.registerHandler(GET, "/_warmer/{name}", this); - controller.registerHandler(GET, "/{index}/_warmer/{name}", this); - controller.registerHandler(GET, "/{index}/_warmers/{name}", this); - controller.registerHandler(GET, "/{index}/{type}/_warmer/{name}", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final String[] types = Strings.splitStringByCommaToArray(request.param("type")); - final String[] names = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); - - GetWarmersRequest getWarmersRequest = new GetWarmersRequest(); - getWarmersRequest.indices(indices).types(types).warmers(names); - getWarmersRequest.local(request.paramAsBoolean("local", getWarmersRequest.local())); - getWarmersRequest.indicesOptions(IndicesOptions.fromRequest(request, getWarmersRequest.indicesOptions())); - client.admin().indices().getWarmers(getWarmersRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(GetWarmersResponse response, XContentBuilder builder) throws Exception { - if (indices.length > 0 && response.warmers().isEmpty()) { - return new BytesRestResponse(OK, builder.startObject().endObject()); - } - - builder.startObject(); - for (ObjectObjectCursor> entry : response.warmers()) { - builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); - builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { - IndexWarmersMetaData.toXContent(warmerEntry, builder, request); - } - builder.endObject(); - builder.endObject(); - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - }); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java deleted file mode 100644 index b47c2542abf..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.rest.action.admin.indices.warmer.put; - -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.support.AcknowledgedRestListener; -import org.elasticsearch.rest.action.support.RestActions; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestRequest.Method.PUT; - -/** - */ -public class RestPutWarmerAction extends BaseRestHandler { - - private final IndicesQueriesRegistry queryRegistry; - - @Inject - public RestPutWarmerAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry) { - super(settings, controller, client); - this.queryRegistry = queryRegistry; - controller.registerHandler(PUT, "/_warmer/{name}", this); - controller.registerHandler(PUT, "/{index}/_warmer/{name}", this); - controller.registerHandler(PUT, "/{index}/{type}/_warmer/{name}", this); - - controller.registerHandler(PUT, "/_warmers/{name}", this); - controller.registerHandler(PUT, "/{index}/_warmers/{name}", this); - controller.registerHandler(PUT, "/{index}/{type}/_warmers/{name}", this); - - controller.registerHandler(POST, "/_warmer/{name}", this); - controller.registerHandler(POST, "/{index}/_warmer/{name}", this); - controller.registerHandler(POST, "/{index}/{type}/_warmer/{name}", this); - - controller.registerHandler(POST, "/_warmers/{name}", this); - controller.registerHandler(POST, "/{index}/_warmers/{name}", this); - controller.registerHandler(POST, "/{index}/{type}/_warmers/{name}", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { - PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name")); - - BytesReference sourceBytes = RestActions.getRestContent(request); - SearchSourceBuilder source = RestActions.getRestSearchSource(sourceBytes, queryRegistry, parseFieldMatcher); - SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))) - .types(Strings.splitStringByCommaToArray(request.param("type"))) - .requestCache(request.paramAsBoolean("request_cache", null)).source(source); - searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); - putWarmerRequest.searchRequest(searchRequest); - putWarmerRequest.timeout(request.paramAsTime("timeout", putWarmerRequest.timeout())); - putWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putWarmerRequest.masterNodeTimeout())); - client.admin().indices().putWarmer(putWarmerRequest, new AcknowledgedRestListener<>(channel)); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index e86132a909e..110aa90047f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.indexing.IndexingStats; +import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.refresh.RefreshStats; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 02efa373ab0..473282a5777 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -28,7 +28,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -94,7 +93,6 @@ import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhase; @@ -102,7 +100,6 @@ import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -201,7 +198,6 @@ public class SearchService extends AbstractLifecycleComponent imp this.indicesWarmer.addListener(new NormsWarmer(indicesWarmer)); this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer)); - this.indicesWarmer.addListener(new SearchWarmer()); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); @@ -1163,76 +1159,6 @@ public class SearchService extends AbstractLifecycleComponent imp } } - class SearchWarmer implements IndicesWarmer.Listener { - - @Override - public TerminationHandle warmNewReaders(IndexShard indexShard, final Engine.Searcher searcher) { - return internalWarm(indexShard, searcher, false); - } - - @Override - public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) { - return internalWarm(indexShard, searcher, true); - } - - public TerminationHandle internalWarm(final IndexShard indexShard, final Engine.Searcher searcher, final boolean top) { - IndexWarmersMetaData custom = indexShard.getIndexSettings().getIndexMetaData().custom(IndexWarmersMetaData.TYPE); - if (custom == null) { - return TerminationHandle.NO_WAIT; - } - final Executor executor = indicesWarmer.getExecutor(); - final CountDownLatch latch = new CountDownLatch(custom.entries().size()); - for (final IndexWarmersMetaData.Entry entry : custom.entries()) { - executor.execute(() -> { - SearchContext context = null; - try { - long now = System.nanoTime(); - final IndexService indexService = indicesService.indexServiceSafe(indexShard.shardId().index().name()); - QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); - queryParseContext.parseFieldMatcher(indexService.getIndexSettings().getParseFieldMatcher()); - ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexShard.getIndexSettings() - .getNumberOfShards(), - SearchType.QUERY_THEN_FETCH, entry.source().build(queryParseContext), entry.types(), entry.requestCache()); - context = createContext(request, searcher); - // if we use sort, we need to do query to sort on - // it and load relevant field data - // if not, we might as well set size=0 (and cache - // if needed) - if (context.sort() == null) { - context.size(0); - } - boolean canCache = indicesQueryCache.canCache(request, context); - // early terminate when we can cache, since we - // can only do proper caching on top level searcher - // also, if we can't cache, and its top, we don't - // need to execute it, since we already did when its - // not top - if (canCache != top) { - return; - } - loadOrExecuteQueryPhase(request, context, queryPhase); - long took = System.nanoTime() - now; - if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took)); - } - } catch (Throwable t) { - indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name()); - } finally { - try { - if (context != null) { - freeContext(context.id()); - cleanContext(context); - } - } finally { - latch.countDown(); - } - } - }); - } - return () -> latch.await(); - } - } - class Reaper implements Runnable { @Override public void run() { diff --git a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java deleted file mode 100644 index 1253a24544d..00000000000 --- a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmerMissingException.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.warmer; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Arrays; - -/** - * - */ -public class IndexWarmerMissingException extends ElasticsearchException { - - private final String[] names; - - public IndexWarmerMissingException(String... names) { - super("index_warmer " + Arrays.toString(names) + " missing"); - this.names = names; - } - - public String[] names() { - return this.names; - } - - - public IndexWarmerMissingException(StreamInput in) throws IOException{ - super(in); - names = in.readStringArray(); - } - - @Override - public RestStatus status() { - return RestStatus.NOT_FOUND; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java deleted file mode 100644 index 1ce27f97cff..00000000000 --- a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.warmer; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.support.ToXContentToBytes; -import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentGenerator; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -/** - */ -public class IndexWarmersMetaData extends AbstractDiffable implements IndexMetaData.Custom { - - public static final String TYPE = "warmers"; - - public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData(); - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - IndexWarmersMetaData that = (IndexWarmersMetaData) o; - - return entries.equals(that.entries); - - } - - @Override - public int hashCode() { - return entries.hashCode(); - } - - public static class Entry { - private final String name; - private final String[] types; - private final SearchSource source; - private final Boolean requestCache; - - public Entry(String name, String[] types, Boolean requestCache, SearchSource source) { - this.name = name; - this.types = types == null ? Strings.EMPTY_ARRAY : types; - this.source = source; - this.requestCache = requestCache; - } - - public String name() { - return this.name; - } - - public String[] types() { - return this.types; - } - - @Nullable - public SearchSource source() { - return this.source; - } - - @Nullable - public Boolean requestCache() { - return this.requestCache; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Entry entry = (Entry) o; - - if (!name.equals(entry.name)) return false; - if (!Arrays.equals(types, entry.types)) return false; - if (!source.equals(entry.source)) return false; - return Objects.equals(requestCache, entry.requestCache); - - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + Arrays.hashCode(types); - result = 31 * result + source.hashCode(); - result = 31 * result + (requestCache != null ? requestCache.hashCode() : 0); - return result; - } - } - - private final List entries; - - - public IndexWarmersMetaData(Entry... entries) { - this.entries = Arrays.asList(entries); - } - - public List entries() { - return this.entries; - } - - @Override - public String type() { - return TYPE; - } - - @Override - public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - String name = in.readString(); - String[] types = in.readStringArray(); - SearchSource source = null; - if (in.readBoolean()) { - source = new SearchSource(in); - } - Boolean queryCache; - queryCache = in.readOptionalBoolean(); - entries[i] = new Entry(name, types, queryCache, source); - } - return new IndexWarmersMetaData(entries); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(entries().size()); - for (Entry entry : entries()) { - out.writeString(entry.name()); - out.writeStringArray(entry.types()); - if (entry.source() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - entry.source.writeTo(out); - } - out.writeOptionalBoolean(entry.requestCache()); - } - } - - @Override - public IndexWarmersMetaData fromMap(Map map) throws IOException { - // if it starts with the type, remove it - if (map.size() == 1 && map.containsKey(TYPE)) { - map = (Map) map.values().iterator().next(); - } - XContentBuilder builder = XContentFactory.smileBuilder().map(map); - try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { - // move to START_OBJECT - parser.nextToken(); - return fromXContent(parser); - } - } - - @Override - public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { - // we get here after we are at warmers token - String currentFieldName = null; - XContentParser.Token token; - List entries = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - String name = currentFieldName; - List types = new ArrayList<>(2); - SearchSource source = null; - Boolean queryCache = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if ("types".equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - types.add(parser.text()); - } - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("source".equals(currentFieldName)) { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - try (XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out)) { - generator.copyCurrentStructure(parser); - } - source = new SearchSource(new BytesArray(out.toByteArray())); - } - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - if ("source".equals(currentFieldName)) { - source = new SearchSource(new BytesArray(parser.binaryValue())); - } - } else if (token.isValue()) { - if ("requestCache".equals(currentFieldName) || "request_cache".equals(currentFieldName)) { - queryCache = parser.booleanValue(); - } - } - } - entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - //No need, IndexMetaData already writes it - //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (Entry entry : entries()) { - toXContent(entry, builder, params); - } - //No need, IndexMetaData already writes it - //builder.endObject(); - return builder; - } - - public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("types", entry.types()); - if (entry.requestCache() != null) { - builder.field("requestCache", entry.requestCache()); - } - builder.field("source", entry.source()); - builder.endObject(); - } - - @Override - public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) { - IndexWarmersMetaData second = (IndexWarmersMetaData) other; - List entries = new ArrayList<>(); - entries.addAll(entries()); - for (Entry secondEntry : second.entries()) { - boolean found = false; - for (Entry firstEntry : entries()) { - if (firstEntry.name().equals(secondEntry.name())) { - found = true; - break; - } - } - if (!found) { - entries.add(secondEntry); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - public static class SearchSource extends ToXContentToBytes implements Writeable { - private final BytesReference binary; - private SearchSourceBuilder cached; - - public SearchSource(BytesReference bytesArray) { - if (bytesArray == null) { - throw new IllegalArgumentException("bytesArray must not be null"); - } - this.binary = bytesArray; - } - - public SearchSource(StreamInput input) throws IOException { - this(input.readBytesReference()); - } - - public SearchSource(SearchSourceBuilder source) { - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - source.toXContent(builder, ToXContent.EMPTY_PARAMS); - binary = builder.bytes(); - } catch (IOException ex) { - throw new ElasticsearchException("failed to generate XContent", ex); - } - } - - public SearchSourceBuilder build(QueryParseContext ctx) throws IOException { - if (cached == null) { - try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) { - ctx.reset(parser); - cached = SearchSourceBuilder.parseSearchSource(parser, ctx); - } - } - return cached; - } - - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (binary == null) { - cached.toXContent(builder, params); - } else { - try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) { - builder.copyCurrentStructure(parser); - } - } - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBytesReference(binary); - } - - @Override - public SearchSource readFrom(StreamInput in) throws IOException { - return new SearchSource(in); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - SearchSource that = (SearchSource) o; - - return binary.equals(that.binary); - - } - - @Override - public int hashCode() { - return binary.hashCode(); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 5d0c814a285..0e6204ddd10 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -357,7 +357,7 @@ public class ThreadPool extends AbstractComponent { if (!Names.SAME.equals(name)) { command = new ThreadedRunnable(command, executor(name)); } - return scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS); + return scheduler.schedule(new LoggingRunnable(command), delay.millis(), TimeUnit.MILLISECONDS); } public void shutdown() { @@ -458,7 +458,7 @@ public class ThreadPool extends AbstractComponent { if (ThreadPoolType.FIXED == previousInfo.getThreadPoolType()) { SizeValue updatedQueueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", previousInfo.getQueueSize()))); if (Objects.equals(previousInfo.getQueueSize(), updatedQueueSize)) { - int updatedSize = settings.getAsInt("size", previousInfo.getMax()); + int updatedSize = applyHardSizeLimit(name, settings.getAsInt("size", previousInfo.getMax())); if (previousInfo.getMax() != updatedSize) { logger.debug("updating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize); // if you think this code is crazy: that's because it is! @@ -480,7 +480,7 @@ public class ThreadPool extends AbstractComponent { defaultQueueSize = previousInfo.getQueueSize(); } - int size = settings.getAsInt("size", defaultSize); + int size = applyHardSizeLimit(name, settings.getAsInt("size", defaultSize)); SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize))); logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize); Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory); @@ -533,6 +533,21 @@ public class ThreadPool extends AbstractComponent { throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); } + private int applyHardSizeLimit(String name, int size) { + int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + if ((name.equals(Names.BULK) || name.equals(Names.INDEX)) && size > availableProcessors) { + // We use a hard max size for the indexing pools, because if too many threads enter Lucene's IndexWriter, it means + // too many segments written, too frequently, too much merging, etc: + // TODO: I would love to be loud here (throw an exception if you ask for a too-big size), but I think this is dangerous + // because on upgrade this setting could be in cluster state and hard for the user to correct? + logger.warn("requested thread pool size [{}] for [{}] is too large; setting to maximum [{}] instead", + size, name, availableProcessors); + size = availableProcessors; + } + + return size; + } + private void updateSettings(Settings settings) { Map groupSettings = settings.getAsGroups(); if (groupSettings.isEmpty()) { @@ -633,6 +648,7 @@ public class ThreadPool extends AbstractComponent { runnable.run(); } catch (Throwable t) { logger.warn("failed to run {}", t, runnable.toString()); + throw t; } } diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 3d1cd56ba71..6a528194bfa 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -71,7 +71,6 @@ import org.elasticsearch.search.SearchException; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.warmer.IndexWarmerMissingException; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; @@ -494,12 +493,6 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("[_na] msg", ex.getMessage()); } - public void testIndexWarmerMissingException() throws IOException { - IndexWarmerMissingException ex = serialize(new IndexWarmerMissingException("w1", "w2")); - assertEquals("index_warmer [w1, w2] missing", ex.getMessage()); - assertArrayEquals(new String[]{"w1", "w2"}, ex.names()); - } - public void testIndexTemplateMissingException() throws IOException { IndexTemplateMissingException ex = serialize(new IndexTemplateMissingException("name")); assertEquals("index_template [name] missing", ex.getMessage()); @@ -735,7 +728,6 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class); ids.put(91, org.elasticsearch.search.aggregations.AggregationInitializationException.class); ids.put(92, org.elasticsearch.indices.recovery.DelayRecoveryException.class); - ids.put(93, org.elasticsearch.search.warmer.IndexWarmerMissingException.class); ids.put(94, org.elasticsearch.client.transport.NoNodeAvailableException.class); ids.put(95, null); ids.put(96, org.elasticsearch.snapshots.InvalidSnapshotNameException.class); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index e878a3df45c..74416742d12 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -52,7 +51,6 @@ public class GetIndexIT extends ESIntegTestCase { assertAcked(prepareCreate("idx").addAlias(new Alias("alias_idx")).addMapping("type1", "{\"type1\":{}}") .setSettings(Settings.builder().put("number_of_shards", 1)).get()); ensureSearchable("idx"); - assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("idx")).get()); createIndex("empty_idx"); ensureSearchable("idx", "empty_idx"); } @@ -66,7 +64,6 @@ public class GetIndexIT extends ESIntegTestCase { assertAliases(response, "idx"); assertMappings(response, "idx"); assertSettings(response, "idx"); - assertWarmers(response, "idx"); } public void testSimpleUnknownIndex() { @@ -87,7 +84,6 @@ public class GetIndexIT extends ESIntegTestCase { assertEmptyAliases(response); assertEmptyOrOnlyDefaultMappings(response, "empty_idx"); assertNonEmptySettings(response, "empty_idx"); - assertEmptyWarmers(response); } public void testSimpleMapping() { @@ -100,7 +96,6 @@ public class GetIndexIT extends ESIntegTestCase { assertMappings(response, "idx"); assertEmptyAliases(response); assertEmptySettings(response); - assertEmptyWarmers(response); } public void testSimpleAlias() { @@ -113,7 +108,6 @@ public class GetIndexIT extends ESIntegTestCase { assertAliases(response, "idx"); assertEmptyMappings(response); assertEmptySettings(response); - assertEmptyWarmers(response); } public void testSimpleSettings() { @@ -126,20 +120,6 @@ public class GetIndexIT extends ESIntegTestCase { assertSettings(response, "idx"); assertEmptyAliases(response); assertEmptyMappings(response); - assertEmptyWarmers(response); - } - - public void testSimpleWarmer() { - GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"), - Feature.WARMERS); - String[] indices = response.indices(); - assertThat(indices, notNullValue()); - assertThat(indices.length, equalTo(1)); - assertThat(indices[0], equalTo("idx")); - assertWarmers(response, "idx"); - assertEmptyAliases(response); - assertEmptyMappings(response); - assertEmptySettings(response); } public void testSimpleMixedFeatures() { @@ -169,11 +149,6 @@ public class GetIndexIT extends ESIntegTestCase { } else { assertEmptySettings(response); } - if (features.contains(Feature.WARMERS)) { - assertWarmers(response, "idx"); - } else { - assertEmptyWarmers(response); - } } public void testEmptyMixedFeatures() { @@ -199,7 +174,6 @@ public class GetIndexIT extends ESIntegTestCase { } else { assertEmptySettings(response); } - assertEmptyWarmers(response); } public void testGetIndexWithBlocks() { @@ -235,18 +209,6 @@ public class GetIndexIT extends ESIntegTestCase { } } - private void assertWarmers(GetIndexResponse response, String indexName) { - ImmutableOpenMap> warmers = response.warmers(); - assertThat(warmers, notNullValue()); - assertThat(warmers.size(), equalTo(1)); - List indexWarmers = warmers.get(indexName); - assertThat(indexWarmers, notNullValue()); - assertThat(indexWarmers.size(), equalTo(1)); - Entry warmer = indexWarmers.get(0); - assertThat(warmer, notNullValue()); - assertThat(warmer.name(), equalTo("warmer1")); - } - private void assertSettings(GetIndexResponse response, String indexName) { ImmutableOpenMap settings = response.settings(); assertThat(settings, notNullValue()); @@ -305,11 +267,6 @@ public class GetIndexIT extends ESIntegTestCase { assertThat(alias.alias(), equalTo("alias_idx")); } - private void assertEmptyWarmers(GetIndexResponse response) { - assertThat(response.warmers(), notNullValue()); - assertThat(response.warmers().isEmpty(), equalTo(true)); - } - private void assertEmptySettings(GetIndexResponse response) { assertThat(response.settings(), notNullValue()); assertThat(response.settings().isEmpty(), equalTo(true)); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java deleted file mode 100644 index f20564e1712..00000000000 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.warmer.put; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.Matchers.hasSize; - -public class PutWarmerRequestTests extends ESTestCase { - // issue 4196 - public void testThatValidationWithoutSpecifyingSearchRequestFails() { - PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo"); - ActionRequestValidationException validationException = putWarmerRequest.validate(); - assertThat(validationException.validationErrors(), hasSize(1)); - assertThat(validationException.getMessage(), containsString("search request is missing")); - } -} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 357a7a6b7de..2b2fae6dd00 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -65,6 +65,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -75,7 +76,15 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class TransportReplicationActionTests extends ESTestCase { @@ -289,7 +298,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, 0); // start with a replica - clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); + clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithRelocatingReplica = state(index, true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); @@ -310,7 +319,7 @@ public class TransportReplicationActionTests extends ESTestCase { primaryPhase.run(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0); - for (String node : new String[] {relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) { + for (String node : new String[]{relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) { List requests = transport.capturedRequestsByTargetNode().get(node); assertThat(requests, notNullValue()); assertThat(requests.size(), equalTo(1)); @@ -484,7 +493,39 @@ public class TransportReplicationActionTests extends ESTestCase { replicationPhase.run(); final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests(); transport.clear(); - assertThat(capturedRequests.length, equalTo(assignedReplicas)); + + HashMap nodesSentTo = new HashMap<>(); + boolean executeOnReplica = + action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings()); + for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) { + // no duplicate requests + Request replicationRequest = (Request) capturedRequest.request; + assertNull(nodesSentTo.put(capturedRequest.node.getId(), replicationRequest)); + // the request is hitting the correct shard + assertEquals(request.shardId, replicationRequest.shardId); + } + + // no request was sent to the local node + assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId()))); + + // requests were sent to the correct shard copies + for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id())) { + if (shard.primary() == false && executeOnReplica == false) { + continue; + } + if (shard.unassigned()) { + continue; + } + if (shard.primary() == false) { + nodesSentTo.remove(shard.currentNodeId()); + } + if (shard.relocating()) { + nodesSentTo.remove(shard.relocatingNodeId()); + } + } + + assertThat(nodesSentTo.entrySet(), is(empty())); + if (assignedReplicas > 0) { assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false)); } @@ -509,6 +550,12 @@ public class TransportReplicationActionTests extends ESTestCase { transport.clear(); assertEquals(1, shardFailedRequests.length); CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0]; + // get the shard the request was sent to + ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id()); + // and the shard that was requested to be failed + ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request; + // the shard the request was sent to and the shard to be failed should be the same + assertEquals(shardRoutingEntry.getShardRouting(), routing); failures.add(shardFailedRequest); transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java index 9a87c888747..9abe6bfbf44 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.test.ESBackcompatTestCase; import java.util.List; @@ -88,21 +87,4 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase { assertThat(settings.get("index.number_of_shards"), equalTo("1")); } - public void testGetWarmers() throws Exception { - createIndex("test"); - ensureSearchable("test"); - assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("test")).get()); - ensureSearchable("test"); - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS) - .execute().actionGet(); - ImmutableOpenMap> warmersMap = getIndexResponse.warmers(); - assertThat(warmersMap, notNullValue()); - assertThat(warmersMap.size(), equalTo(1)); - List warmersList = warmersMap.get("test"); - assertThat(warmersList, notNullValue()); - assertThat(warmersList.size(), equalTo(1)); - Entry warmer = warmersList.get(0); - assertThat(warmer, notNullValue()); - assertThat(warmer.name(), equalTo("warmer1")); - } } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 7011b4092e4..667716937da 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -32,20 +32,25 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.MultiDataPathUpgrader; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -423,4 +428,62 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { UpgradeIT.assertUpgraded(client(), indexName); } + private Path getNodeDir(String indexFile) throws IOException { + Path unzipDir = createTempDir(); + Path unzipDataDir = unzipDir.resolve("data"); + + // decompress the index + Path backwardsIndex = getBwcIndicesPath().resolve(indexFile); + try (InputStream stream = Files.newInputStream(backwardsIndex)) { + TestUtil.unzip(stream, unzipDir); + } + + // check it is unique + assertTrue(Files.exists(unzipDataDir)); + Path[] list = FileSystemUtils.files(unzipDataDir); + if (list.length != 1) { + throw new IllegalStateException("Backwards index must contain exactly one cluster"); + } + + // the bwc scripts packs the indices under this path + return list[0].resolve("nodes/0/"); + } + + public void testOldClusterStates() throws Exception { + // dangling indices do not load the global state, only the per-index states + // so we make sure we can read them separately + MetaDataStateFormat globalFormat = new MetaDataStateFormat(XContentType.JSON, "global-") { + + @Override + public void toXContent(XContentBuilder builder, MetaData state) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public MetaData fromXContent(XContentParser parser) throws IOException { + return MetaData.Builder.fromXContent(parser); + } + }; + MetaDataStateFormat indexFormat = new MetaDataStateFormat(XContentType.JSON, "state-") { + + @Override + public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public IndexMetaData fromXContent(XContentParser parser) throws IOException { + return IndexMetaData.Builder.fromXContent(parser); + } + }; + Collections.shuffle(indexes, random()); + for (String indexFile : indexes) { + String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); + Path nodeDir = getNodeDir(indexFile); + logger.info("Parsing cluster state files from index [" + indexName + "]"); + assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception + Path indexDir = nodeDir.resolve("indices").resolve(indexName); + assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 6e7e338d8b9..2d781c866de 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -51,9 +52,12 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -61,6 +65,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -796,7 +801,92 @@ public class ClusterServiceIT extends ESIntegTestCase { assertTrue(published.get()); } - public void testClusterStateBatchedUpdates() throws InterruptedException { + // test that for a single thread, tasks are executed in the order + // that they are submitted + public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + class TaskExecutor implements ClusterStateTaskExecutor { + List tasks = new ArrayList<>(); + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + this.tasks.addAll(tasks); + return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + } + + int numberOfThreads = randomIntBetween(2, 8); + TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + executors[i] = new TaskExecutor(); + } + + int tasksSubmittedPerThread = randomIntBetween(2, 1024); + + CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure: [{}]", t, source); + failures.add(new Tuple<>(source, t)); + updateLatch.countDown(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + updateLatch.countDown(); + } + }; + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); + } + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + updateLatch.await(); + + assertThat(failures, empty()); + + for (int i = 0; i < numberOfThreads; i++) { + assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assertNotNull(executors[i].tasks.get(j)); + assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j)); + } + } + } + + public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { Settings settings = settingsBuilder() .put("discovery.type", "local") .build(); @@ -884,19 +974,12 @@ public class ClusterServiceIT extends ESIntegTestCase { counts.merge(executor, 1, (previous, one) -> previous + one); } - CountDownLatch startGate = new CountDownLatch(1); - CountDownLatch endGate = new CountDownLatch(numberOfThreads); - AtomicBoolean interrupted = new AtomicBoolean(); + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { final int index = i; Thread thread = new Thread(() -> { try { - try { - startGate.await(); - } catch (InterruptedException e) { - interrupted.set(true); - return; - } + barrier.await(); for (int j = 0; j < tasksSubmittedPerThread; j++) { ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); clusterService.submitStateUpdateTask( @@ -906,16 +989,18 @@ public class ClusterServiceIT extends ESIntegTestCase { executor, listener); } - } finally { - endGate.countDown(); + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); } }); thread.start(); } - startGate.countDown(); - endGate.await(); - assertFalse(interrupted.get()); + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); // wait until all the cluster state updates have been processed updateLatch.await(); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 6f69daeef5c..0a4777e23d4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -49,7 +48,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collections; @@ -493,9 +491,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase { builder.settings(settingsBuilder); builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); int aliasCount = randomInt(10); - if (randomBoolean()) { - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - } for (int i = 0; i < aliasCount; i++) { builder.putAlias(randomAlias()); } @@ -505,7 +500,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { @Override public IndexMetaData randomChange(IndexMetaData part) { IndexMetaData.Builder builder = IndexMetaData.builder(part); - switch (randomIntBetween(0, 3)) { + switch (randomIntBetween(0, 2)) { case 0: builder.settings(Settings.builder().put(part.getSettings()).put(randomSettings(Settings.EMPTY))); break; @@ -519,9 +514,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase { case 2: builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID())); break; - case 3: - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - break; default: throw new IllegalArgumentException("Shouldn't be here"); } @@ -530,23 +522,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase { }); } - /** - * Generates a random warmer - */ - private IndexWarmersMetaData randomWarmers() { - if (randomBoolean()) { - return new IndexWarmersMetaData( - new IndexWarmersMetaData.Entry( - randomName("warm"), - new String[]{randomName("type")}, - randomBoolean(), - new IndexWarmersMetaData.SearchSource(new BytesArray(randomAsciiOfLength(1000)))) - ); - } else { - return new IndexWarmersMetaData(); - } - } - /** * Randomly adds, deletes or updates index templates in the metadata */ @@ -577,9 +552,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase { for (int i = 0; i < aliasCount; i++) { builder.putAlias(randomAlias()); } - if (randomBoolean()) { - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - } return builder.build(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 13a5cae6ca3..9f646d0df58 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.ack; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; @@ -27,9 +26,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -42,12 +38,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import java.util.List; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -88,83 +81,6 @@ public class AckIT extends ESIntegTestCase { assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false)); } - public void testPutWarmerAcknowledgement() { - createIndex("test"); - // make sure one shard is started so the search during put warmer will not fail - index("test", "type", "1", "f", 1); - - assertAcked(client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))); - - for (Client client : clients()) { - GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); - assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); - assertThat(entry.key, equalTo("test")); - assertThat(entry.value.size(), equalTo(1)); - assertThat(entry.value.get(0).name(), equalTo("custom_warmer")); - } - } - - public void testPutWarmerNoAcknowledgement() throws InterruptedException { - createIndex("test"); - // make sure one shard is started so the search during put warmer will not fail - index("test", "type", "1", "f", 1); - - PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(false)); - /* Since we don't wait for the ack here we have to wait until the search request has been executed from the master - * otherwise the test infra might have already deleted the index and the search request fails on all shards causing - * the test to fail too. We simply wait until the the warmer has been installed and also clean it up afterwards.*/ - assertTrue(awaitBusy(() -> { - for (Client client : clients()) { - GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); - if (getWarmersResponse.warmers().size() != 1) { - return false; - } - } - return true; - })); - assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer")); - } - - public void testDeleteWarmerAcknowledgement() { - createIndex("test"); - index("test", "type", "1", "f", 1); - - assertAcked(client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))); - - assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer")); - - for (Client client : clients()) { - GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); - assertThat(getWarmersResponse.warmers().size(), equalTo(0)); - } - } - - public void testDeleteWarmerNoAcknowledgement() throws InterruptedException { - createIndex("test"); - index("test", "type", "1", "f", 1); - - assertAcked(client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))); - - DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").setTimeout("0s").get(); - assertFalse(deleteWarmerResponse.isAcknowledged()); - assertTrue(awaitBusy(() -> { - for (Client client : clients()) { - GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); - if (getWarmersResponse.warmers().size() > 0) { - return false; - } - } - return true; - })); - } - public void testClusterRerouteAcknowledgement() throws InterruptedException { assertAcked(prepareCreate("test").setSettings(Settings.builder() .put(indexSettings()) diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 91a421ee420..4076286ce5d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -20,9 +20,15 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -110,4 +116,36 @@ public class MetaDataTests extends ESTestCase { assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); } } + + public void testUnknownFieldClusterMetaData() throws IOException { + BytesReference metadata = JsonXContent.contentBuilder() + .startObject() + .startObject("meta-data") + .field("random", "value") + .endObject() + .endObject().bytes(); + XContentParser parser = JsonXContent.jsonXContent.createParser(metadata); + try { + MetaData.Builder.fromXContent(parser); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("Unexpected field [random]", e.getMessage()); + } + } + + public void testUnknownFieldIndexMetaData() throws IOException { + BytesReference metadata = JsonXContent.contentBuilder() + .startObject() + .startObject("index_name") + .field("random", "value") + .endObject() + .endObject().bytes(); + XContentParser parser = JsonXContent.jsonXContent.createParser(metadata); + try { + IndexMetaData.Builder.fromXContent(parser); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("Unexpected field [random]", e.getMessage()); + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 8df9d1e09a4..6dda699f3d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -381,8 +381,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.relocatingNodeId(); - logger.error("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); - logger.error(routingNodes.prettyPrint()); + logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } } diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 0985bc4b88e..921c66f7acb 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -31,7 +31,10 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -42,6 +45,8 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; public class CacheTests extends ESTestCase { private int numberOfEntries; @@ -483,7 +488,7 @@ public class CacheTests extends ESTestCase { return value; }); } catch (ExecutionException e) { - fail(e.getMessage()); + throw new AssertionError(e); } } for (int i = 0; i < numberOfEntries; i++) { @@ -491,25 +496,21 @@ public class CacheTests extends ESTestCase { } } - public void testComputeIfAbsentCallsOnce() throws InterruptedException { + public void testComputeIfAbsentCallsOnce() throws BrokenBarrierException, InterruptedException { int numberOfThreads = randomIntBetween(2, 32); final Cache cache = CacheBuilder.builder().build(); AtomicReferenceArray flags = new AtomicReferenceArray(numberOfEntries); for (int j = 0; j < numberOfEntries; j++) { flags.set(j, false); } - CountDownLatch startGate = new CountDownLatch(1); - CountDownLatch endGate = new CountDownLatch(numberOfThreads); - AtomicBoolean interrupted = new AtomicBoolean(); + + CopyOnWriteArrayList failures = new CopyOnWriteArrayList<>(); + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { Thread thread = new Thread(() -> { try { - try { - startGate.await(); - } catch (InterruptedException e) { - interrupted.set(true); - return; - } + barrier.await(); for (int j = 0; j < numberOfEntries; j++) { try { cache.computeIfAbsent(j, key -> { @@ -517,18 +518,24 @@ public class CacheTests extends ESTestCase { return Integer.toString(key); }); } catch (ExecutionException e) { - throw new RuntimeException(e); + failures.add(e); + break; } } - } finally { - endGate.countDown(); + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); } }); thread.start(); } - startGate.countDown(); - endGate.await(); - assertFalse(interrupted.get()); + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + assertThat(failures, is(empty())); } public void testComputeIfAbsentThrowsExceptionIfLoaderReturnsANullValue() { @@ -541,7 +548,7 @@ public class CacheTests extends ESTestCase { } } - public void testDependentKeyDeadlock() throws InterruptedException { + public void testDependentKeyDeadlock() throws BrokenBarrierException, InterruptedException { class Key { private final int key; @@ -568,18 +575,19 @@ public class CacheTests extends ESTestCase { int numberOfThreads = randomIntBetween(2, 32); final Cache cache = CacheBuilder.builder().build(); - CountDownLatch startGate = new CountDownLatch(1); + + CopyOnWriteArrayList failures = new CopyOnWriteArrayList<>(); + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); CountDownLatch deadlockLatch = new CountDownLatch(numberOfThreads); - AtomicBoolean interrupted = new AtomicBoolean(); List threads = new ArrayList<>(); for (int i = 0; i < numberOfThreads; i++) { Thread thread = new Thread(() -> { try { try { - startGate.await(); - } catch (InterruptedException e) { - interrupted.set(true); - return; + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); } Random random = new Random(random().nextLong()); for (int j = 0; j < numberOfEntries; j++) { @@ -594,7 +602,8 @@ public class CacheTests extends ESTestCase { } }); } catch (ExecutionException e) { - fail(e.getMessage()); + failures.add(e); + break; } } } finally { @@ -631,7 +640,7 @@ public class CacheTests extends ESTestCase { }, 1, 1, TimeUnit.SECONDS); // everything is setup, release the hounds - startGate.countDown(); + barrier.await(); // wait for either deadlock to be detected or the threads to terminate deadlockLatch.await(); @@ -639,24 +648,21 @@ public class CacheTests extends ESTestCase { // shutdown the watchdog service scheduler.shutdown(); + assertThat(failures, is(empty())); + assertFalse("deadlock", deadlock.get()); } - public void testCachePollution() throws InterruptedException { + public void testCachePollution() throws BrokenBarrierException, InterruptedException { int numberOfThreads = randomIntBetween(2, 32); final Cache cache = CacheBuilder.builder().build(); - CountDownLatch startGate = new CountDownLatch(1); - CountDownLatch endGate = new CountDownLatch(numberOfThreads); - AtomicBoolean interrupted = new AtomicBoolean(); + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + for (int i = 0; i < numberOfThreads; i++) { Thread thread = new Thread(() -> { try { - try { - startGate.await(); - } catch (InterruptedException e) { - interrupted.set(true); - return; - } + barrier.await(); Random random = new Random(random().nextLong()); for (int j = 0; j < numberOfEntries; j++) { Integer key = random.nextInt(numberOfEntries); @@ -686,21 +692,23 @@ public class CacheTests extends ESTestCase { cache.get(key); } } - } finally { - endGate.countDown(); + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); } }); thread.start(); } - startGate.countDown(); - endGate.await(); - assertFalse(interrupted.get()); + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); } // test that the cache is not corrupted under lots of concurrent modifications, even hitting the same key // here be dragons: this test did catch one subtle bug during development; do not remove lightly - public void testTorture() throws InterruptedException { + public void testTorture() throws BrokenBarrierException, InterruptedException { int numberOfThreads = randomIntBetween(2, 32); final Cache cache = CacheBuilder.builder() @@ -708,32 +716,28 @@ public class CacheTests extends ESTestCase { .weigher((k, v) -> 2) .build(); - CountDownLatch startGate = new CountDownLatch(1); - CountDownLatch endGate = new CountDownLatch(numberOfThreads); - AtomicBoolean interrupted = new AtomicBoolean(); + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { Thread thread = new Thread(() -> { try { - try { - startGate.await(); - } catch (InterruptedException e) { - interrupted.set(true); - return; - } + barrier.await(); Random random = new Random(random().nextLong()); for (int j = 0; j < numberOfEntries; j++) { Integer key = random.nextInt(numberOfEntries); cache.put(key, Integer.toString(j)); } - } finally { - endGate.countDown(); + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); } }); thread.start(); } - startGate.countDown(); - endGate.await(); - assertFalse(interrupted.get()); + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); cache.refresh(); assertEquals(500, cache.count()); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 0a15693dfd5..484b88f096f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -54,14 +53,6 @@ import java.util.concurrent.atomic.AtomicBoolean; * */ public class LuceneTests extends ESTestCase { - /** - * simple test that ensures that we bump the version on Upgrade - */ - public void testVersion() { - // note this is just a silly sanity check, we test it in lucene, and we point to it this way - assertEquals(Lucene.VERSION, Version.LATEST); - } - public void testWaitForIndex() throws Exception { final MockDirectoryWrapper dir = newMockDirectory(); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index fb3c021fd5d..d6abcfe7735 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -56,7 +56,6 @@ import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -292,7 +291,6 @@ public class VersionsTests extends ESTestCase { } iw.close(); - assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false)); ir.close(); dir.close(); } diff --git a/core/src/test/java/org/elasticsearch/index/indexing/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java similarity index 94% rename from core/src/test/java/org/elasticsearch/index/indexing/IndexingSlowLogTests.java rename to core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 10dd82a7ae8..c2ca2ff1509 100644 --- a/core/src/test/java/org/elasticsearch/index/indexing/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.index.indexing; +package org.elasticsearch.index; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.IntField; import org.apache.lucene.document.StringField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.indexing.IndexingSlowLog.SlowLogParsedDocumentPrinter; +import org.elasticsearch.index.IndexingSlowLog.SlowLogParsedDocumentPrinter; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ESTestCase; diff --git a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index d2bf6bebc5c..69831d7471a 100644 --- a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -31,8 +31,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitSetProducer; +import org.apache.lucene.store.BaseDirectoryWrapper; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; @@ -89,7 +92,8 @@ public class BitSetFilterCacheTests extends ESTestCase { writer.addDocument(document); writer.commit(); - IndexReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer, false); + reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(new Index("test"), 0)); IndexSearcher searcher = new IndexSearcher(reader); BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, warmer, new BitsetFilterCache.Listener() { @@ -114,6 +118,7 @@ public class BitSetFilterCacheTests extends ESTestCase { writer.forceMerge(1); reader.close(); reader = DirectoryReader.open(writer, false); + reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(new Index("test"), 0)); searcher = new IndexSearcher(reader); assertThat(matchCount(filter, reader), equalTo(3)); @@ -139,7 +144,7 @@ public class BitSetFilterCacheTests extends ESTestCase { writer.addDocument(document); writer.commit(); final DirectoryReader writerReader = DirectoryReader.open(writer, false); - final IndexReader reader = randomBoolean() ? writerReader : ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", 0)); + final IndexReader reader = ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", 0)); final AtomicLong stats = new AtomicLong(); final AtomicInteger onCacheCalls = new AtomicInteger(); @@ -192,4 +197,39 @@ public class BitSetFilterCacheTests extends ESTestCase { } } + public void testRejectOtherIndex() throws IOException { + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, warmer, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter( + dir, + newIndexWriterConfig() + ); + writer.addDocument(new Document()); + DirectoryReader reader = DirectoryReader.open(writer, true); + writer.close(); + reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(new Index("test2"), 0)); + + BitSetProducer producer = cache.getBitSetProducer(new MatchAllDocsQuery()); + + try { + producer.getBitSet(reader.leaves().get(0)); + fail(); + } catch (IllegalStateException expected) { + assertEquals("Trying to load bit set for index [test2] with cache of index [test]", expected.getMessage()); + } finally { + IOUtils.close(reader, dir); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java index eae80418b0d..03a87dec232 100644 --- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -111,7 +111,7 @@ public class CodecTests extends ESTestCase { SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap()); AnalysisService analysisService = new AnalysisRegistry(null, new Environment(nodeSettings)).build(settings); MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()); - MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry); + MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry, () -> null); return new CodecService(service, ESLoggerFactory.getLogger("test")); } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index cc0a3d7f3df..e23b3ddef41 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -71,11 +71,9 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Engine.Searcher; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; -import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; @@ -274,7 +272,7 @@ public class InternalEngineTests extends ESTestCase { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, INDEX_SETTINGS), indexSettings + EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings , null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), new Engine.EventListener() { @Override @@ -1972,7 +1970,7 @@ public class InternalEngineTests extends ESTestCase { AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry(); - MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry); + MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null); DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); this.docMapper = b.build(mapperService); } @@ -2020,7 +2018,7 @@ public class InternalEngineTests extends ESTestCase { /* create a TranslogConfig that has been created with a different UUID */ TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettings() + EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexSettings() , null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getMergeSchedulerConfig(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener() , config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 79e18a96969..bd170667bea 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -48,7 +48,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; @@ -225,7 +224,7 @@ public class ShadowEngineTests extends ESTestCase { public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, indexSettings), indexSettings + EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings , null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig, iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(null, logger), new Engine.EventListener() { @Override diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 024a90ce7ba..82b2cca79aa 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -46,12 +46,15 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; @@ -385,7 +388,9 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI writer.commit(); } } - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + DirectoryReader directoryReader = DirectoryReader.open(writer, true); + directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("test"), 0)); + IndexSearcher searcher = new IndexSearcher(directoryReader); IndexFieldData fieldData = getForField("text"); final Object missingValue; switch (randomInt(4)) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java index bf3196fdcf7..ba05ea81054 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java @@ -55,7 +55,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser())); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type") .startObject(ExternalMetadataMapper.CONTENT_TYPE) @@ -101,7 +101,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") @@ -160,7 +160,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index 0852f61617b..d4ee73fa543 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -236,9 +236,9 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { IndicesModule indicesModule = new IndicesModule(); indicesModule.registerMetadataMapper("_dummy", new DummyMetadataFieldMapper.TypeParser()); final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry); + MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService, - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}")); diff --git a/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java b/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java new file mode 100644 index 00000000000..f23ec6d9595 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java @@ -0,0 +1,287 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.test.ESTestCase; + + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ExtractQueryTermsServiceTests extends ESTestCase { + + public final static String QUERY_TERMS_FIELD = "extracted_terms"; + public final static String UNKNOWN_QUERY_FIELD = "unknown_query"; + public static FieldType QUERY_TERMS_FIELD_TYPE = new FieldType(); + + static { + QUERY_TERMS_FIELD_TYPE.setTokenized(false); + QUERY_TERMS_FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + QUERY_TERMS_FIELD_TYPE.freeze(); + } + + public void testExtractQueryMetadata() { + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + TermQuery termQuery1 = new TermQuery(new Term("field1", "term1")); + bq.add(termQuery1, BooleanClause.Occur.SHOULD); + TermQuery termQuery2 = new TermQuery(new Term("field2", "term2")); + bq.add(termQuery2, BooleanClause.Occur.SHOULD); + + ParseContext.Document document = new ParseContext.Document(); + ExtractQueryTermsService.extractQueryTerms(bq.build(), document, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD, QUERY_TERMS_FIELD_TYPE); + Collections.sort(document.getFields(), (field1, field2) -> field1.binaryValue().compareTo(field2.binaryValue())); + assertThat(document.getFields().size(), equalTo(2)); + assertThat(document.getFields().get(0).name(), equalTo(QUERY_TERMS_FIELD)); + assertThat(document.getFields().get(0).binaryValue().utf8ToString(), equalTo("field1\u0000term1")); + assertThat(document.getFields().get(1).name(), equalTo(QUERY_TERMS_FIELD)); + assertThat(document.getFields().get(1).binaryValue().utf8ToString(), equalTo("field2\u0000term2")); + } + + public void testExtractQueryMetadata_unsupported() { + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + TermQuery termQuery1 = new TermQuery(new Term("field1", "term1")); + bq.add(termQuery1, BooleanClause.Occur.SHOULD); + TermQuery termQuery2 = new TermQuery(new Term("field2", "term2")); + bq.add(termQuery2, BooleanClause.Occur.SHOULD); + + TermRangeQuery query = new TermRangeQuery("field1", new BytesRef("a"), new BytesRef("z"), true, true); + ParseContext.Document document = new ParseContext.Document(); + ExtractQueryTermsService.extractQueryTerms(query, document, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD, QUERY_TERMS_FIELD_TYPE); + assertThat(document.getFields().size(), equalTo(1)); + assertThat(document.getFields().get(0).name(), equalTo(UNKNOWN_QUERY_FIELD)); + assertThat(document.getFields().get(0).binaryValue().utf8ToString(), equalTo("")); + } + + public void testExtractQueryMetadata_termQuery() { + TermQuery termQuery = new TermQuery(new Term("_field", "_term")); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(termQuery)); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo(termQuery.getTerm().field())); + assertThat(terms.get(0).bytes(), equalTo(termQuery.getTerm().bytes())); + } + + public void testExtractQueryMetadata_phraseQuery() { + PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2"); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(phraseQuery)); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo(phraseQuery.getTerms()[0].field())); + assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); + } + + public void testExtractQueryMetadata_booleanQuery() { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); + builder.add(termQuery1, BooleanClause.Occur.SHOULD); + PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2"); + builder.add(phraseQuery, BooleanClause.Occur.SHOULD); + + BooleanQuery.Builder subBuilder = new BooleanQuery.Builder(); + TermQuery termQuery2 = new TermQuery(new Term("_field1", "_term")); + subBuilder.add(termQuery2, BooleanClause.Occur.MUST); + TermQuery termQuery3 = new TermQuery(new Term("_field3", "_long_term")); + subBuilder.add(termQuery3, BooleanClause.Occur.MUST); + builder.add(subBuilder.build(), BooleanClause.Occur.SHOULD); + + BooleanQuery booleanQuery = builder.build(); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery)); + Collections.sort(terms); + assertThat(terms.size(), equalTo(3)); + assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field())); + assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); + assertThat(terms.get(1).field(), equalTo(phraseQuery.getTerms()[0].field())); + assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); + assertThat(terms.get(2).field(), equalTo(termQuery3.getTerm().field())); + assertThat(terms.get(2).bytes(), equalTo(termQuery3.getTerm().bytes())); + } + + public void testExtractQueryMetadata_booleanQuery_onlyShould() { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); + builder.add(termQuery1, BooleanClause.Occur.SHOULD); + TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2")); + builder.add(termQuery2, BooleanClause.Occur.SHOULD); + + BooleanQuery.Builder subBuilder = new BooleanQuery.Builder(); + TermQuery termQuery3 = new TermQuery(new Term("_field1", "_term")); + subBuilder.add(termQuery3, BooleanClause.Occur.SHOULD); + TermQuery termQuery4 = new TermQuery(new Term("_field3", "_long_term")); + subBuilder.add(termQuery4, BooleanClause.Occur.SHOULD); + builder.add(subBuilder.build(), BooleanClause.Occur.SHOULD); + + BooleanQuery booleanQuery = builder.build(); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery)); + Collections.sort(terms); + assertThat(terms.size(), equalTo(4)); + assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field())); + assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); + assertThat(terms.get(1).field(), equalTo(termQuery2.getTerm().field())); + assertThat(terms.get(1).bytes(), equalTo(termQuery2.getTerm().bytes())); + assertThat(terms.get(2).field(), equalTo(termQuery3.getTerm().field())); + assertThat(terms.get(2).bytes(), equalTo(termQuery3.getTerm().bytes())); + assertThat(terms.get(3).field(), equalTo(termQuery4.getTerm().field())); + assertThat(terms.get(3).bytes(), equalTo(termQuery4.getTerm().bytes())); + } + + public void testExtractQueryMetadata_booleanQueryWithMustNot() { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); + builder.add(termQuery1, BooleanClause.Occur.MUST_NOT); + PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2"); + builder.add(phraseQuery, BooleanClause.Occur.SHOULD); + + BooleanQuery booleanQuery = builder.build(); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery)); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo(phraseQuery.getTerms()[0].field())); + assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); + } + + public void testExtractQueryMetadata_constantScoreQuery() { + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); + ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(termQuery1); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(constantScoreQuery)); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field())); + assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); + } + + public void testExtractQueryMetadata_boostQuery() { + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); + BoostQuery constantScoreQuery = new BoostQuery(termQuery1, 1f); + List terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(constantScoreQuery)); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field())); + assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); + } + + public void testExtractQueryMetadata_unsupportedQuery() { + TermRangeQuery termRangeQuery = new TermRangeQuery("_field", null, null, true, false); + + try { + ExtractQueryTermsService.extractQueryTerms(termRangeQuery); + fail("UnsupportedQueryException expected"); + } catch (ExtractQueryTermsService.UnsupportedQueryException e) { + assertThat(e.getUnsupportedQuery(), sameInstance(termRangeQuery)); + } + + TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); + BooleanQuery.Builder builder = new BooleanQuery.Builder();; + builder.add(termQuery1, BooleanClause.Occur.SHOULD); + builder.add(termRangeQuery, BooleanClause.Occur.SHOULD); + BooleanQuery bq = builder.build(); + + try { + ExtractQueryTermsService.extractQueryTerms(bq); + fail("UnsupportedQueryException expected"); + } catch (ExtractQueryTermsService.UnsupportedQueryException e) { + assertThat(e.getUnsupportedQuery(), sameInstance(termRangeQuery)); + } + } + + public void testCreateQueryMetadataQuery() throws Exception { + MemoryIndex memoryIndex = new MemoryIndex(false); + memoryIndex.addField("field1", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); + memoryIndex.addField("field2", "some more text", new WhitespaceAnalyzer()); + memoryIndex.addField("_field3", "unhide me", new WhitespaceAnalyzer()); + memoryIndex.addField("field4", "123", new WhitespaceAnalyzer()); + + IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); + Query query = ExtractQueryTermsService.createQueryTermsQuery(indexReader, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD); + assertThat(query, instanceOf(TermsQuery.class)); + + // no easy way to get to the terms in TermsQuery, + // if there a less then 16 terms then it gets rewritten to bq and then we can easily check the terms + BooleanQuery booleanQuery = (BooleanQuery) ((ConstantScoreQuery) query.rewrite(indexReader)).getQuery(); + assertThat(booleanQuery.clauses().size(), equalTo(15)); + assertClause(booleanQuery, 0, QUERY_TERMS_FIELD, "_field3\u0000me"); + assertClause(booleanQuery, 1, QUERY_TERMS_FIELD, "_field3\u0000unhide"); + assertClause(booleanQuery, 2, QUERY_TERMS_FIELD, "field1\u0000brown"); + assertClause(booleanQuery, 3, QUERY_TERMS_FIELD, "field1\u0000dog"); + assertClause(booleanQuery, 4, QUERY_TERMS_FIELD, "field1\u0000fox"); + assertClause(booleanQuery, 5, QUERY_TERMS_FIELD, "field1\u0000jumps"); + assertClause(booleanQuery, 6, QUERY_TERMS_FIELD, "field1\u0000lazy"); + assertClause(booleanQuery, 7, QUERY_TERMS_FIELD, "field1\u0000over"); + assertClause(booleanQuery, 8, QUERY_TERMS_FIELD, "field1\u0000quick"); + assertClause(booleanQuery, 9, QUERY_TERMS_FIELD, "field1\u0000the"); + assertClause(booleanQuery, 10, QUERY_TERMS_FIELD, "field2\u0000more"); + assertClause(booleanQuery, 11, QUERY_TERMS_FIELD, "field2\u0000some"); + assertClause(booleanQuery, 12, QUERY_TERMS_FIELD, "field2\u0000text"); + assertClause(booleanQuery, 13, QUERY_TERMS_FIELD, "field4\u0000123"); + assertClause(booleanQuery, 14, UNKNOWN_QUERY_FIELD, ""); + } + + public void testSelectTermsListWithHighestSumOfTermLength() { + Set terms1 = new HashSet<>(); + int shortestTerms1Length = Integer.MAX_VALUE; + int sumTermLength = randomIntBetween(1, 128); + while (sumTermLength > 0) { + int length = randomInt(sumTermLength); + shortestTerms1Length = Math.min(shortestTerms1Length, length); + terms1.add(new Term("field", randomAsciiOfLength(length))); + sumTermLength -= length; + } + + Set terms2 = new HashSet<>(); + int shortestTerms2Length = Integer.MAX_VALUE; + sumTermLength = randomIntBetween(1, 128); + while (sumTermLength > 0) { + int length = randomInt(sumTermLength); + shortestTerms2Length = Math.min(shortestTerms2Length, length); + terms2.add(new Term("field", randomAsciiOfLength(length))); + sumTermLength -= length; + } + + Set result = ExtractQueryTermsService.selectTermListWithTheLongestShortestTerm(terms1, terms2); + Set expected = shortestTerms1Length >= shortestTerms2Length ? terms1 : terms2; + assertThat(result, sameInstance(expected)); + } + + private void assertClause(BooleanQuery booleanQuery, int i, String expectedField, String expectedValue) { + assertThat(booleanQuery.clauses().get(i).getOccur(), equalTo(BooleanClause.Occur.SHOULD)); + assertThat(((TermQuery) booleanQuery.clauses().get(i).getQuery()).getTerm().field(), equalTo(expectedField)); + assertThat(((TermQuery) booleanQuery.clauses().get(i).getQuery()).getTerm().bytes().utf8ToString(), equalTo(expectedValue)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java new file mode 100644 index 00000000000..5ce841540d1 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { + + private MapperService mapperService; + + @Before + public void init() throws Exception { + IndexService indexService = createIndex("test", Settings.EMPTY); + mapperService = indexService.mapperService(); + + String mapper = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string").endObject().endObject() + .endObject().endObject().string(); + mapperService.merge("type", new CompressedXContent(mapper), true, true); + + String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME) + .startObject("properties").startObject("query").field("type", "percolator").endObject().endObject() + .endObject().endObject().string(); + mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), true, true); + } + + public void testPercolatorFieldMapper() throws Exception { + ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + .field("query", termQuery("field", "value")) + .endObject().bytes()); + + assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(1)); + assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME)[0].binaryValue().utf8ToString(), equalTo("field\0value")); + } + + public void testPercolatorFieldMapper_noQuery() throws Exception { + ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + .endObject().bytes()); + assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(0)); + + try { + mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + .nullField("query") + .endObject().bytes()); + } catch (MapperParsingException e) { + assertThat(e.getDetailedMessage(), containsString("query malformed, must start with start_object")); + } + } + + public void testAllowNoAdditionalSettings() throws Exception { + IndexService indexService = createIndex("test1", Settings.EMPTY); + MapperService mapperService = indexService.mapperService(); + + String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME) + .startObject("properties").startObject("query").field("type", "percolator").field("index", "no").endObject().endObject() + .endObject().endObject().string(); + try { + mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), true, true); + fail("MapperParsingException expected"); + } catch (MapperParsingException e) { + assertThat(e.getMessage(), equalTo("Mapping definition for [query] has unsupported parameters: [index : no]")); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index 8faa2dac524..b88d0be23b6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -255,7 +255,7 @@ public abstract class AbstractQueryTestCase> ScriptService scriptService = injector.getInstance(ScriptService.class); SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class); - MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry); + MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, () -> queryShardContext); indexFieldDataService = new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService); BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new IndicesWarmer(idxSettings.getNodeSettings(), null), new BitsetFilterCache.Listener() { @Override diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index fa27323c13a..65dfd8a5af4 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -36,11 +36,14 @@ import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AbstractFieldDataTestCase; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; @@ -216,7 +219,9 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD writer.addDocument(document); MultiValueMode sortMode = MultiValueMode.SUM; - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); + DirectoryReader directoryReader = DirectoryReader.open(writer, false); + directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("test"), 0)); + IndexSearcher searcher = new IndexSearcher(directoryReader); Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = Queries.not(parentFilter); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter)); diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 49af5f0b82e..a58fea831d5 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -40,8 +40,10 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AbstractFieldDataTestCase; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -49,6 +51,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.elasticsearch.index.fielddata.NoOrdinalsStringFieldDataTests; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; @@ -91,7 +94,9 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { writer.commit(); MultiValueMode sortMode = randomFrom(Arrays.asList(MultiValueMode.MIN, MultiValueMode.MAX)); - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); + DirectoryReader reader = DirectoryReader.open(writer, false); + reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(new Index("test"), 0)); + IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData1 = getForField("f"); IndexFieldData indexFieldData2 = NoOrdinalsStringFieldDataTests.hideOrdinals(indexFieldData1); final String missingValue = randomBoolean() ? null : TestUtil.randomSimpleString(getRandom(), 2); @@ -274,7 +279,9 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { writer.addDocument(document); MultiValueMode sortMode = MultiValueMode.MIN; - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); + DirectoryReader reader = DirectoryReader.open(writer, false); + reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(new Index("test"), 0)); + IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData = getForField("field2"); Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = Queries.not(parentFilter); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 2ec38eeb1ab..a3bc14b7d93 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -79,8 +79,6 @@ import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.flush.FlushStats; -import org.elasticsearch.index.indexing.IndexingOperationListener; -import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; @@ -103,19 +101,23 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -647,77 +649,77 @@ public class IndexShardTests extends ESSingleNodeTestCase { return new ParsedDocument(uidField, versionField, seqNoField, id, type, routing, timestamp, ttl, Arrays.asList(document), source, mappingUpdate); } - public void testPreIndex() throws IOException { - createIndex("testpreindex"); + public void testIndexingOperationsListeners() throws IOException { + createIndex("test_iol"); ensureGreen(); + client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testpreindex"); + IndexService test = indicesService.indexService("test_iol"); IndexShard shard = test.getShardOrNull(0); - ShardIndexingService shardIndexingService = shard.indexingService(); - final AtomicBoolean preIndexCalled = new AtomicBoolean(false); - - shardIndexingService.addListener(new IndexingOperationListener() { + AtomicInteger preIndex = new AtomicInteger(); + AtomicInteger postIndex = new AtomicInteger(); + AtomicInteger postIndexException = new AtomicInteger(); + AtomicInteger preDelete = new AtomicInteger(); + AtomicInteger postDelete = new AtomicInteger(); + AtomicInteger postDeleteException = new AtomicInteger(); + shard = reinitWithWrapper(test, shard, null, new IndexingOperationListener() { @Override public Engine.Index preIndex(Engine.Index operation) { - preIndexCalled.set(true); - return super.preIndex(operation); + preIndex.incrementAndGet(); + return operation; } - }); - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null); - Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); - shard.index(index); - assertTrue(preIndexCalled.get()); - } - - public void testPostIndex() throws IOException { - createIndex("testpostindex"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testpostindex"); - IndexShard shard = test.getShardOrNull(0); - ShardIndexingService shardIndexingService = shard.indexingService(); - final AtomicBoolean postIndexCalled = new AtomicBoolean(false); - - shardIndexingService.addListener(new IndexingOperationListener() { @Override public void postIndex(Engine.Index index) { - postIndexCalled.set(true); - super.postIndex(index); + postIndex.incrementAndGet(); + } + + @Override + public void postIndex(Engine.Index index, Throwable ex) { + postIndexException.incrementAndGet(); + } + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + preDelete.incrementAndGet(); + return delete; + } + + @Override + public void postDelete(Engine.Delete delete) { + postDelete.incrementAndGet(); + } + + @Override + public void postDelete(Engine.Delete delete, Throwable ex) { + postDeleteException.incrementAndGet(); + } }); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null); Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); shard.index(index); - assertTrue(postIndexCalled.get()); - } + assertEquals(1, preIndex.get()); + assertEquals(1, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(0, preDelete.get()); + assertEquals(0, postDelete.get()); + assertEquals(0, postDeleteException.get()); - public void testPostIndexWithException() throws IOException { - createIndex("testpostindexwithexception"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testpostindexwithexception"); - IndexShard shard = test.getShardOrNull(0); - ShardIndexingService shardIndexingService = shard.indexingService(); + Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); + shard.delete(delete); + + assertEquals(1, preIndex.get()); + assertEquals(1, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(1, preDelete.get()); + assertEquals(1, postDelete.get()); + assertEquals(0, postDeleteException.get()); shard.close("Unexpected close", true); shard.state = IndexShardState.STARTED; // It will generate exception - final AtomicBoolean postIndexWithExceptionCalled = new AtomicBoolean(false); - - shardIndexingService.addListener(new IndexingOperationListener() { - @Override - public void postIndex(Engine.Index index, Throwable ex) { - assertNotNull(ex); - postIndexWithExceptionCalled.set(true); - super.postIndex(index, ex); - } - }); - - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null); - Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); - try { shard.index(index); fail(); @@ -725,7 +727,26 @@ public class IndexShardTests extends ESSingleNodeTestCase { } - assertTrue(postIndexWithExceptionCalled.get()); + assertEquals(2, preIndex.get()); + assertEquals(1, postIndex.get()); + assertEquals(1, postIndexException.get()); + assertEquals(1, preDelete.get()); + assertEquals(1, postDelete.get()); + assertEquals(0, postDeleteException.get()); + try { + shard.delete(delete); + fail(); + }catch (IllegalIndexShardStateException e){ + + } + + assertEquals(2, preIndex.get()); + assertEquals(1, postIndex.get()); + assertEquals(1, postIndexException.get()); + assertEquals(2, preDelete.get()); + assertEquals(1, postDelete.get()); + assertEquals(1, postDeleteException.get()); + } public void testMaybeFlush() throws Exception { @@ -1081,11 +1102,11 @@ public class IndexShardTests extends ESSingleNodeTestCase { // test will fail due to unclosed searchers if the searcher is not released } - private final IndexShard reinitWithWrapper(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper) throws IOException { + private final IndexShard reinitWithWrapper(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, IndexingOperationListener... listeners) throws IOException { ShardRouting routing = new ShardRouting(shard.routingEntry()); shard.close("simon says", true); NodeServicesProvider indexServices = indexService.getIndexServices(); - IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices); + IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, listeners); ShardRoutingHelper.reinit(routing); newShard.updateRoutingEntry(routing, false); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); @@ -1097,4 +1118,29 @@ public class IndexShardTests extends ESSingleNodeTestCase { return newShard; } + public void testTranslogRecoverySyncsTranslog() throws IOException { + createIndex("testindexfortranslogsync"); + client().admin().indices().preparePutMapping("testindexfortranslogsync").setType("testtype").setSource(jsonBuilder().startObject() + .startObject("testtype") + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .endObject().endObject().endObject()).get(); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("testindexfortranslogsync"); + IndexShard shard = test.getShardOrNull(0); + ShardRouting routing = new ShardRouting(shard.routingEntry()); + test.removeShard(0, "b/c britta says so"); + IndexShard newShard = test.createShard(routing); + DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); + newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode)); + List operations = new ArrayList<>(); + operations.add(new Translog.Index("testtype", "1", jsonBuilder().startObject().field("foo", "bar").endObject().bytes().toBytes())); + newShard.prepareForIndexRecovery(); + newShard.performTranslogRecovery(true); + newShard.performBatchRecovery(operations); + assertFalse(newShard.getTranslog().syncNeeded()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java new file mode 100644 index 00000000000..92bbf06a7b6 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.apache.lucene.index.Term; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +public class IndexingOperationListenerTests extends ESTestCase{ + + // this test also tests if calls are correct if one or more listeners throw exceptions + public void testListenersAreExecuted() { + AtomicInteger preIndex = new AtomicInteger(); + AtomicInteger postIndex = new AtomicInteger(); + AtomicInteger postIndexException = new AtomicInteger(); + AtomicInteger preDelete = new AtomicInteger(); + AtomicInteger postDelete = new AtomicInteger(); + AtomicInteger postDeleteException = new AtomicInteger(); + IndexingOperationListener listener = new IndexingOperationListener() { + @Override + public Engine.Index preIndex(Engine.Index operation) { + preIndex.incrementAndGet(); + return operation; + } + + @Override + public void postIndex(Engine.Index index) { + postIndex.incrementAndGet(); + } + + @Override + public void postIndex(Engine.Index index, Throwable ex) { + postIndexException.incrementAndGet(); + } + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + preDelete.incrementAndGet(); + return delete; + } + + @Override + public void postDelete(Engine.Delete delete) { + postDelete.incrementAndGet(); + } + + @Override + public void postDelete(Engine.Delete delete, Throwable ex) { + postDeleteException.incrementAndGet(); + } + }; + + IndexingOperationListener throwingListener = new IndexingOperationListener() { + @Override + public Engine.Index preIndex(Engine.Index operation) { + throw new RuntimeException(); + } + + @Override + public void postIndex(Engine.Index index) { + throw new RuntimeException(); } + + @Override + public void postIndex(Engine.Index index, Throwable ex) { + throw new RuntimeException(); } + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + throw new RuntimeException(); + } + + @Override + public void postDelete(Engine.Delete delete) { + throw new RuntimeException(); } + + @Override + public void postDelete(Engine.Delete delete, Throwable ex) { + throw new RuntimeException(); + } + }; + final List indexingOperationListeners = new ArrayList<>(Arrays.asList(listener, listener)); + if (randomBoolean()) { + indexingOperationListeners.add(throwingListener); + if (randomBoolean()) { + indexingOperationListeners.add(throwingListener); + } + } + Collections.shuffle(indexingOperationListeners, random()); + IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); + Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); + Engine.Index index = new Engine.Index(new Term("_uid", "1"), null); + compositeListener.postDelete(delete); + assertEquals(0, preIndex.get()); + assertEquals(0, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(0, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(0, postDeleteException.get()); + + compositeListener.postDelete(delete, new RuntimeException()); + assertEquals(0, preIndex.get()); + assertEquals(0, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(0, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(2, postDeleteException.get()); + + compositeListener.preDelete(delete); + assertEquals(0, preIndex.get()); + assertEquals(0, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(2, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(2, postDeleteException.get()); + + compositeListener.postIndex(index); + assertEquals(0, preIndex.get()); + assertEquals(2, postIndex.get()); + assertEquals(0, postIndexException.get()); + assertEquals(2, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(2, postDeleteException.get()); + + compositeListener.postIndex(index, new RuntimeException()); + assertEquals(0, preIndex.get()); + assertEquals(2, postIndex.get()); + assertEquals(2, postIndexException.get()); + assertEquals(2, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(2, postDeleteException.get()); + + compositeListener.preIndex(index); + assertEquals(2, preIndex.get()); + assertEquals(2, postIndex.get()); + assertEquals(2, postIndexException.get()); + assertEquals(2, preDelete.get()); + assertEquals(2, postDelete.get()); + assertEquals(2, postDeleteException.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index e5e40dfa255..400cc4b8609 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -55,6 +56,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.file.FileAlreadyExistsException; import java.nio.file.Files; import java.nio.file.InvalidPathException; import java.nio.file.Path; @@ -136,8 +138,8 @@ public class TranslogTests extends ESTestCase { private TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() - .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) - .build(); + .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .build(); ByteSizeValue bufferSize = randomBoolean() ? TranslogConfig.DEFAULT_BUFFER_SIZE : new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES); return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), BigArrays.NON_RECYCLING_INSTANCE, bufferSize); } @@ -634,7 +636,9 @@ public class TranslogTests extends ESTestCase { assertFileIsPresent(translog, 1); } - /** Tests that concurrent readers and writes maintain view and snapshot semantics */ + /** + * Tests that concurrent readers and writes maintain view and snapshot semantics + */ public void testConcurrentWriteViewsAndSnapshot() throws Throwable { final Thread[] writers = new Thread[randomIntBetween(1, 10)]; final Thread[] readers = new Thread[randomIntBetween(1, 10)]; @@ -1094,7 +1098,7 @@ public class TranslogTests extends ESTestCase { } } - public void testRecoveryUncommittedCorryptedCheckpoint() throws IOException { + public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { List locations = new ArrayList<>(); int translogOperations = 100; final int prepareOp = 44; @@ -1307,19 +1311,20 @@ public class TranslogTests extends ESTestCase { return translog.add(op); } - protected void afterAdd() throws IOException {} + protected void afterAdd() throws IOException { + } } public void testFailFlush() throws IOException { Path tempDir = createTempDir(); - final AtomicBoolean fail = new AtomicBoolean(); + final FailSwitch fail = new FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config); List locations = new ArrayList<>(); int opsSynced = 0; boolean failed = false; - while(failed == false) { + while (failed == false) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); translog.sync(); @@ -1331,10 +1336,14 @@ public class TranslogTests extends ESTestCase { failed = true; assertFalse(translog.isOpen()); assertEquals("__FAKE__ no space left on device", ex.getMessage()); - } - fail.set(randomBoolean()); + } + if (randomBoolean()) { + fail.failAlways(); + } else { + fail.failNever(); + } } - fail.set(false); + fail.failNever(); if (randomBoolean()) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); @@ -1370,7 +1379,7 @@ public class TranslogTests extends ESTestCase { assertFalse(translog.isOpen()); translog.close(); // we are closed config.setTranslogGeneration(translogGeneration); - try (Translog tlog = new Translog(config)){ + try (Translog tlog = new Translog(config)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1393,7 +1402,7 @@ public class TranslogTests extends ESTestCase { for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertEquals(opsAdded+1, snapshot.estimatedTotalOperations()); + assertEquals(opsAdded + 1, snapshot.estimatedTotalOperations()); for (int i = 0; i < opsAdded; i++) { assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation); Translog.Operation next = snapshot.next(); @@ -1405,13 +1414,13 @@ public class TranslogTests extends ESTestCase { public void testTragicEventCanBeAnyException() throws IOException { Path tempDir = createTempDir(); - final AtomicBoolean fail = new AtomicBoolean(); + final FailSwitch fail = new FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); - assumeFalse("this won't work if we sync on any op",config.isSyncOnEachOperation()); + assumeFalse("this won't work if we sync on any op", config.isSyncOnEachOperation()); Translog translog = getFailableTranslog(fail, config, false, true); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); - fail.set(true); + fail.failAlways(); try { Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { @@ -1427,12 +1436,12 @@ public class TranslogTests extends ESTestCase { assertTrue(ex.getCause() instanceof UnknownException); } assertFalse(translog.isOpen()); - assertTrue(translog.getTragicException() instanceof UnknownException); + assertTrue(translog.getTragicException() instanceof UnknownException); } public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { Path tempDir = createTempDir(); - final AtomicBoolean fail = new AtomicBoolean(false); + final FailSwitch fail = new FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config); @@ -1469,7 +1478,7 @@ public class TranslogTests extends ESTestCase { // this holds a reference to the current tlog channel such that it's not closed // if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip // otherwise our assertions here are off by one sometimes. - fail.set(true); + fail.failAlways(); for (int i = 0; i < threadCount; i++) { threads[i].join(); } @@ -1520,11 +1529,41 @@ public class TranslogTests extends ESTestCase { } } } - private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException { + + private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException { return getFailableTranslog(fail, config, randomBoolean(), false); } - private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException { + private static class FailSwitch { + private volatile int failRate; + private volatile boolean onceFailedFailAlways = false; + public boolean fail() { + boolean fail = randomIntBetween(1, 100) <= failRate; + if (fail && onceFailedFailAlways) { + failAlways(); + } + return fail; + } + + public void failNever() { + failRate = 0; + } + + public void failAlways() { + failRate = 100; + } + + public void failRandomly() { + failRate = randomIntBetween(1, 100); + } + + public void onceFailedFailAlways() { + onceFailedFailAlways = true; + } + } + + + private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException { return new Translog(config) { @Override TranslogWriter.ChannelFactory getChannelFactory() { @@ -1534,23 +1573,56 @@ public class TranslogTests extends ESTestCase { @Override public FileChannel open(Path file) throws IOException { FileChannel channel = factory.open(file); - return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel); + boolean success = false; + try { + ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel); + success = true; + return throwingFileChannel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(channel); + } + } } }; } + + @Override + protected boolean assertBytesAtLocation(Location location, BytesReference expectedBytes) throws IOException { + return true; // we don't wanna fail in the assert + } }; } public static class ThrowingFileChannel extends FilterFileChannel { - private final AtomicBoolean fail; + private final FailSwitch fail; private final boolean partialWrite; private final boolean throwUnknownException; - public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) { + public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException { super(delegate); this.fail = fail; this.partialWrite = partialWrite; this.throwUnknownException = throwUnknownException; + if (fail.fail()) { + throw new MockDirectoryWrapper.FakeIOException(); + } + } + + @Override + public int read(ByteBuffer dst) throws IOException { + if (fail.fail()) { + throw new MockDirectoryWrapper.FakeIOException(); + } + return super.read(dst); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if (fail.fail()) { + throw new MockDirectoryWrapper.FakeIOException(); + } + return super.read(dsts, offset, length); } @Override @@ -1565,7 +1637,7 @@ public class TranslogTests extends ESTestCase { public int write(ByteBuffer src) throws IOException { - if (fail.get()) { + if (fail.fail()) { if (partialWrite) { if (src.hasRemaining()) { final int pos = src.position(); @@ -1585,6 +1657,22 @@ public class TranslogTests extends ESTestCase { } return super.write(src); } + + @Override + public void force(boolean metaData) throws IOException { + if (fail.fail()) { + throw new MockDirectoryWrapper.FakeIOException(); + } + super.force(metaData); + } + + @Override + public long position() throws IOException { + if (fail.fail()) { + throw new MockDirectoryWrapper.FakeIOException(); + } + return super.position(); + } } private static final class UnknownException extends RuntimeException { @@ -1613,4 +1701,177 @@ public class TranslogTests extends ESTestCase { // all is well } } + + public void testRecoverWithUnbackedNextGen() throws IOException { + translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + translog.close(); + TranslogConfig config = translog.getConfig(); + + Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); + Checkpoint read = Checkpoint.read(ckp); + Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation))); + Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); + config.setTranslogGeneration(translogGeneration); + try (Translog tlog = new Translog(config)) { + assertNotNull(translogGeneration); + assertFalse(tlog.syncNeeded()); + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + for (int i = 0; i < 1; i++) { + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + tlog.add(new Translog.Index("test", "" + 1, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + } + try (Translog tlog = new Translog(config)) { + assertNotNull(translogGeneration); + assertFalse(tlog.syncNeeded()); + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + for (int i = 0; i < 2; i++) { + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + } + } + + public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { + translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + translog.close(); + TranslogConfig config = translog.getConfig(); + Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); + Checkpoint read = Checkpoint.read(ckp); + // don't copy the new file + Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); + config.setTranslogGeneration(translogGeneration); + + try { + Translog tlog = new Translog(config); + fail("file already exists?"); + } catch (TranslogException ex) { + // all is well + assertEquals(ex.getMessage(), "failed to create new translog file"); + assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); + } + } + public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { + translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + translog.close(); + TranslogConfig config = translog.getConfig(); + + Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); + Checkpoint read = Checkpoint.read(ckp); + Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation))); + Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); + // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition + Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); + config.setTranslogGeneration(translogGeneration); + try (Translog tlog = new Translog(config)) { + assertNotNull(translogGeneration); + assertFalse(tlog.syncNeeded()); + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + for (int i = 0; i < 1; i++) { + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + tlog.add(new Translog.Index("test", "" + 1, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + } + + try { + Translog tlog = new Translog(config); + fail("file already exists?"); + } catch (TranslogException ex) { + // all is well + assertEquals(ex.getMessage(), "failed to create new translog file"); + assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); + } + } + + /** + * This test adds operations to the translog which might randomly throw an IOException. The only thing this test verifies is + * that we can, after we hit an exception, open and recover the translog successfully and retrieve all successfully synced operations + * from the transaction log. + */ + public void testWithRandomException() throws IOException { + final int runs = randomIntBetween(5, 10); + for (int run = 0; run < runs; run++) { + Path tempDir = createTempDir(); + final FailSwitch fail = new FailSwitch(); + fail.failRandomly(); + TranslogConfig config = getTranslogConfig(tempDir); + final int numOps = randomIntBetween(100, 200); + List syncedDocs = new ArrayList<>(); + List unsynced = new ArrayList<>(); + if (randomBoolean()) { + fail.onceFailedFailAlways(); + } + try { + final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false); + try { + LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly + for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { + String doc = lineFileDocs.nextDoc().toString(); + failableTLog.add(new Translog.Index("test", "" + opsAdded, doc.getBytes(Charset.forName("UTF-8")))); + unsynced.add(doc); + if (randomBoolean()) { + failableTLog.sync(); + syncedDocs.addAll(unsynced); + unsynced.clear(); + } + if (randomFloat() < 0.1) { + failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails + syncedDocs.addAll(unsynced); + unsynced.clear(); + if (randomBoolean()) { + failableTLog.prepareCommit(); + } + failableTLog.commit(); + syncedDocs.clear(); + } + } + // we survived all the randomness!!! + // lets close the translog and if it succeeds we are all synced again. If we don't do this we will close + // it in the finally block but miss to copy over unsynced docs to syncedDocs and fail the assertion down the road... + failableTLog.close(); + syncedDocs.addAll(unsynced); + unsynced.clear(); + } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { + // fair enough + } catch (IOException ex) { + assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); + } finally { + config.setTranslogGeneration(failableTLog.getGeneration()); + IOUtils.closeWhileHandlingException(failableTLog); + } + } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { + // failed - that's ok, we didn't even create it + } + // now randomly open this failing tlog again just to make sure we can also recover from failing during recovery + if (randomBoolean()) { + try { + IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false)); + } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { + // failed - that's ok, we didn't even create it + } + } + + try (Translog translog = new Translog(config)) { + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertEquals(syncedDocs.size(), snapshot.estimatedTotalOperations()); + for (int i = 0; i < syncedDocs.size(); i++) { + Translog.Operation next = snapshot.next(); + assertEquals(syncedDocs.get(i), next.getSource().source.toUtf8()); + assertNotNull("operation " + i + " must be non-null", next); + } + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java similarity index 99% rename from core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java rename to core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index f8de8825b6d..a9e4b35f614 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.indices.memory; +package org.elasticsearch.indices; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java similarity index 97% rename from core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java rename to core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index d980c3c598d..19f91befbd7 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.indices.memory; +package org.elasticsearch.indices; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -24,8 +24,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -33,6 +33,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ScheduledFuture; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -120,6 +121,11 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { activeShards.add(shard); forceCheck(); } + + @Override + protected ScheduledFuture scheduleTask(ThreadPool threadPool) { + return null; + } } public void testShardAdditionAndRemoval() { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 2723f49a77a..aeb4ac55410 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBui import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateSourceBuilder; @@ -49,10 +48,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.suggest.SuggestBuilders; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder; @@ -86,7 +82,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1", "test2"), true); verify(getFieldMapping("test1", "test2"), true); verify(getMapping("test1", "test2"), true); - verify(getWarmer("test1", "test2"), true); verify(getSettings("test1", "test2"), true); IndicesOptions options = IndicesOptions.strictExpandOpen(); @@ -107,7 +102,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1", "test2").setIndicesOptions(options), true); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true); verify(getMapping("test1", "test2").setIndicesOptions(options), true); - verify(getWarmer("test1", "test2").setIndicesOptions(options), true); verify(getSettings("test1", "test2").setIndicesOptions(options), true); options = IndicesOptions.lenientExpandOpen(); @@ -128,7 +122,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); - verify(getWarmer("test1", "test2").setIndicesOptions(options), false); verify(getSettings("test1", "test2").setIndicesOptions(options), false); options = IndicesOptions.strictExpandOpen(); @@ -151,7 +144,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); - verify(getWarmer("test1", "test2").setIndicesOptions(options), false); verify(getSettings("test1", "test2").setIndicesOptions(options), false); } @@ -182,7 +174,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); - verify(getWarmer("test1").setIndicesOptions(options), true); verify(getSettings("test1").setIndicesOptions(options), true); options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options); @@ -203,7 +194,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); - verify(getWarmer("test1").setIndicesOptions(options), false); verify(getSettings("test1").setIndicesOptions(options), false); assertAcked(client().admin().indices().prepareOpen("test1")); @@ -227,7 +217,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); - verify(getWarmer("test1").setIndicesOptions(options), false); verify(getSettings("test1").setIndicesOptions(options), false); } @@ -249,7 +238,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); - verify(getWarmer("test1").setIndicesOptions(options), true); verify(getSettings("test1").setIndicesOptions(options), true); options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options); @@ -269,7 +257,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); - verify(getWarmer("test1").setIndicesOptions(options), false); verify(getSettings("test1").setIndicesOptions(options), false); assertAcked(prepareCreate("test1")); @@ -292,7 +279,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); - verify(getWarmer("test1").setIndicesOptions(options), false); verify(getSettings("test1").setIndicesOptions(options), false); } @@ -346,7 +332,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); - verify(getWarmer(indices), false); verify(getSettings(indices), false); // Now force allow_no_indices=true @@ -368,7 +353,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); - verify(getWarmer(indices).setIndicesOptions(options), false); verify(getSettings(indices).setIndicesOptions(options), false); assertAcked(prepareCreate("foobar")); @@ -393,7 +377,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); - verify(getWarmer(indices), false); verify(getSettings(indices).setIndicesOptions(options), false); // Verify defaults for wildcards, with two wildcard expression and one existing index @@ -415,7 +398,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); - verify(getWarmer(indices), false); verify(getSettings(indices).setIndicesOptions(options), false); // Now force allow_no_indices=true @@ -437,7 +419,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); - verify(getWarmer(indices).setIndicesOptions(options), false); verify(getSettings(indices).setIndicesOptions(options), false); } @@ -581,34 +562,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false)); } - public void testPutWarmer() throws Exception { - createIndex("foobar"); - ensureYellow(); - verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1)); - - } - - public void testPutWarmerWildcard() throws Exception { - createIndex("foo", "foobar", "bar", "barbaz"); - ensureYellow(); - - verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false); - - assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0)); - assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0)); - - verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false); - - assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1)); - - } - public void testPutAlias() throws Exception { createIndex("foobar"); ensureYellow(); @@ -635,46 +588,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { } - public void testDeleteWarmer() throws Exception { - SearchSourceBuilder source = new SearchSourceBuilder(); - source.query(QueryBuilders.matchAllQuery()); - IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "typ1" }, false, new IndexWarmersMetaData.SearchSource(source)); - assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry))); - ensureYellow(); - - verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1)); - verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0)); - } - - public void testDeleteWarmerWildcard() throws Exception { - verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true); - - SearchSourceBuilder source = new SearchSourceBuilder(); - source.query(QueryBuilders.matchAllQuery()); - IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "type1" }, false, new IndexWarmersMetaData.SearchSource(source)); - assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry))); - assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry))); - assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry))); - assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry))); - ensureYellow(); - - verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false); - assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0)); - assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0)); - assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1)); - assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1)); - - assertAcked(client().admin().indices().prepareDelete("foo*")); - - verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true); - - verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false); - assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0)); - assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0)); - } - public void testPutMapping() throws Exception { verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true); verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true); @@ -816,10 +729,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { return client().admin().indices().prepareGetMappings(indices); } - private static GetWarmersRequestBuilder getWarmer(String... indices) { - return client().admin().indices().prepareGetWarmers(indices); - } - private static GetSettingsRequestBuilder getSettings(String... indices) { return client().admin().indices().prepareGetSettings(indices); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 3422959771c..15d334c61a9 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -151,18 +151,10 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { for (int i = 0; i < numSearches; i++) { SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()); - switch (randomIntBetween(0, 5)) { - case 5: - case 4: - case 3: - searchRequestBuilder.addSort("test-str", SortOrder.ASC); - // fall through - sometimes get both fields - case 2: - case 1: - default: - searchRequestBuilder.addSort("test-num", SortOrder.ASC); - + if (random().nextBoolean()) { + searchRequestBuilder.addSort("test-str", SortOrder.ASC); } + searchRequestBuilder.addSort("test-num", SortOrder.ASC); boolean success = false; try { // Sort by the string and numeric fields, to load them into field data @@ -249,6 +241,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { if (random.nextDouble() < topLevelRatio) { throw new IOException("Forced top level Exception on [" + flag.name() + "]"); } + break; case Intersect: break; case Norms: diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerIT.java deleted file mode 100644 index 7c5a154ebcb..00000000000 --- a/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerIT.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.warmer; - -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.hamcrest.Matchers; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -/** - */ -@ClusterScope(numDataNodes =0, scope= Scope.TEST) -public class GatewayIndicesWarmerIT extends ESIntegTestCase { - private final ESLogger logger = Loggers.getLogger(GatewayIndicesWarmerIT.class); - - public void testStatePersistence() throws Exception { - logger.info("--> starting 1 nodes"); - internalCluster().startNode(); - - logger.info("--> putting two templates"); - createIndex("test"); - - ensureYellow(); - - assertAcked(client().admin().indices().preparePutWarmer("warmer_1") - .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1")))); - assertAcked(client().admin().indices().preparePutWarmer("warmer_2") - .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2")))); - - logger.info("--> put template with warmer"); - client().admin().indices().preparePutTemplate("template_1") - .setSource("{\n" + - " \"template\" : \"xxx\",\n" + - " \"warmers\" : {\n" + - " \"warmer_1\" : {\n" + - " \"types\" : [],\n" + - " \"source\" : {\n" + - " \"query\" : {\n" + - " \"match_all\" : {}\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}") - .execute().actionGet(); - - - logger.info("--> verify warmers are registered in cluster state"); - ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(warmersMetaData, Matchers.notNullValue()); - assertThat(warmersMetaData.entries().size(), equalTo(2)); - - IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE); - assertThat(templateWarmers, Matchers.notNullValue()); - assertThat(templateWarmers.entries().size(), equalTo(1)); - - logger.info("--> restarting the node"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return Settings.EMPTY; - } - }); - - ensureYellow(); - - logger.info("--> verify warmers are recovered"); - clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size())); - for (int i = 0; i < warmersMetaData.entries().size(); i++) { - assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name())); - assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source())); - } - - logger.info("--> verify warmers in template are recovered"); - IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE); - assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size())); - for (int i = 0; i < templateWarmers.entries().size(); i++) { - assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name())); - assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source())); - } - - - logger.info("--> delete warmer warmer_1"); - DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet(); - assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true)); - - logger.info("--> verify warmers (delete) are registered in cluster state"); - clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(warmersMetaData, Matchers.notNullValue()); - assertThat(warmersMetaData.entries().size(), equalTo(1)); - - logger.info("--> restarting the node"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return Settings.EMPTY; - } - }); - - ensureYellow(); - - logger.info("--> verify warmers are recovered"); - clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size())); - for (int i = 0; i < warmersMetaData.entries().size(); i++) { - assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name())); - assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source())); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java deleted file mode 100644 index 5ca4a99ac1a..00000000000 --- a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.warmer; - - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; - -import java.util.Arrays; -import java.util.List; - -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_BLOCK; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.hamcrest.Matchers.equalTo; - -@ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class IndicesWarmerBlocksIT extends ESIntegTestCase { - public void testPutWarmerWithBlocks() { - createIndex("test-blocks"); - ensureGreen("test-blocks"); - - // Index reads are blocked, the warmer can't be registered - try { - enableIndexBlock("test-blocks", SETTING_BLOCKS_READ); - assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") - .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK); - } finally { - disableIndexBlock("test-blocks", SETTING_BLOCKS_READ); - } - - // Index writes are blocked, the warmer can be registered - try { - enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); - assertAcked(client().admin().indices().preparePutWarmer("warmer_acked") - .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); - } finally { - disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); - } - - // Index metadata changes are blocked, the warmer can't be registered - try { - enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); - assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") - .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK); - } finally { - disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); - } - - // Index metadata changes are blocked, the warmer can't be registered - try { - enableIndexBlock("test-blocks", SETTING_READ_ONLY); - assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") - .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK); - } finally { - disableIndexBlock("test-blocks", SETTING_READ_ONLY); - } - - // Adding a new warmer is not possible when the cluster is read-only - try { - setClusterReadOnly(true); - assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") - .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK); - } finally { - setClusterReadOnly(false); - } - } - - public void testGetWarmerWithBlocks() { - createIndex("test-blocks"); - ensureGreen("test-blocks"); - - assertAcked(client().admin().indices().preparePutWarmer("warmer_block") - .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); - - // Request is not blocked - for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { - try { - enableIndexBlock("test-blocks", blockSetting); - GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get(); - assertThat(response.warmers().size(), equalTo(1)); - - ObjectObjectCursor> entry = response.warmers().iterator().next(); - assertThat(entry.key, equalTo("test-blocks")); - assertThat(entry.value.size(), equalTo(1)); - assertThat(entry.value.iterator().next().name(), equalTo("warmer_block")); - } finally { - disableIndexBlock("test-blocks", blockSetting); - } - } - - // Request is blocked - try { - enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); - assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK); - } finally { - disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); - } - } - - public void testDeleteWarmerWithBlocks() { - createIndex("test-blocks"); - ensureGreen("test-blocks"); - - // Request is not blocked - for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { - try { - assertAcked(client().admin().indices().preparePutWarmer("warmer_block") - .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); - - enableIndexBlock("test-blocks", blockSetting); - assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); - } finally { - disableIndexBlock("test-blocks", blockSetting); - } - } - - // Request is blocked - for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { - try { - assertAcked(client().admin().indices().preparePutWarmer("warmer_block") - .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); - - enableIndexBlock("test-blocks", blockSetting); - assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); - } finally { - disableIndexBlock("test-blocks", blockSetting); - } - } - } -} diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java deleted file mode 100644 index dbbf3bf7247..00000000000 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.warmer; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.search.warmer.IndexWarmerMissingException; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.test.ESIntegTestCase; -import org.hamcrest.Matchers; - -import java.util.List; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; - -public class SimpleIndicesWarmerIT extends ESIntegTestCase { - public void testSimpleWarmers() { - createIndex("test"); - ensureGreen(); - - PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1") - .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1"))) - .execute().actionGet(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(true)); - putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2") - .setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2"))) - .execute().actionGet(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(true)); - - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); - - GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2)); - assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1")); - assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2")); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2)); - assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1")); - assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2")); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1")); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2")); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1)); - assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2")); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2") - .execute().actionGet(); - assertThat(getWarmersResponse.getWarmers().size(), equalTo(0)); - } - - public void testTtemplateWarmer() { - client().admin().indices().preparePutTemplate("template_1") - .setSource("{\n" + - " \"template\" : \"*\",\n" + - " \"warmers\" : {\n" + - " \"warmer_1\" : {\n" + - " \"types\" : [],\n" + - " \"source\" : {\n" + - " \"query\" : {\n" + - " \"match_all\" : {}\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}") - .execute().actionGet(); - - createIndex("test"); - ensureGreen(); - - ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(warmersMetaData, Matchers.notNullValue()); - assertThat(warmersMetaData.entries().size(), equalTo(1)); - - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); - } - - public void testCreateIndexWarmer() { - assertAcked(prepareCreate("test") - .setSource("{\n" + - " \"warmers\" : {\n" + - " \"warmer_1\" : {\n" + - " \"types\" : [],\n" + - " \"source\" : {\n" + - " \"query\" : {\n" + - " \"match_all\" : {}\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}")); - - ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); - IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE); - assertThat(warmersMetaData, Matchers.notNullValue()); - assertThat(warmersMetaData.entries().size(), equalTo(1)); - - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); - } - - public void testDeleteNonExistentIndexWarmer() { - createIndex("test"); - try { - client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet(); - fail("warmer foo should not exist"); - } catch (IndexWarmerMissingException ex) { - assertThat(ex.names()[0], equalTo("foo")); - } - } - - // issue 8991 - public void testDeleteAllIndexWarmerDoesNotThrowWhenNoWarmers() { - createIndex("test"); - DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer() - .setIndices("test").setNames("_all").execute().actionGet(); - assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true)); - - deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer() - .setIndices("test").setNames("foo", "_all", "bar").execute().actionGet(); - assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true)); - } - - public void testDeleteIndexWarmerTest() { - createIndex("test"); - ensureGreen(); - - PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(true)); - - GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get(); - assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); - assertThat(entry.key, equalTo("test")); - assertThat(entry.value.size(), equalTo(1)); - assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer")); - - DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get(); - assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true)); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get(); - assertThat(getWarmersResponse.warmers().size(), equalTo(0)); - } - - // issue 3246 - public void testEnsureThatIndexWarmersCanBeChangedOnRuntime() throws Exception { - createIndex("test"); - ensureGreen(); - - PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())) - .execute().actionGet(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(true)); - - client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet(); - - logger.info("--> Disabling warmers execution"); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.warmer.enabled", false)).execute().actionGet(); - - long warmerRunsAfterDisabling = getWarmerRuns(); - assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L)); - - client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet(); - - assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling)); - } - - public void testGettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception { - createIndex("test"); - ensureGreen(); - - PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())) - .execute().actionGet(); - assertThat(putWarmerResponse.isAcknowledged(), equalTo(true)); - - PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer") - .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())) - .execute().actionGet(); - assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true)); - - GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get(); - assertThat(getWarmersResponse.warmers().size(), is(1)); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get(); - assertThat(getWarmersResponse.warmers().size(), is(1)); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get(); - assertThat(getWarmersResponse.warmers().size(), is(1)); - - getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get(); - assertThat(getWarmersResponse.warmers().size(), is(1)); - } - - private long getWarmerRuns() { - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet(); - return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total(); - } - - public void testQueryCacheOnWarmer() { - createIndex("test"); - ensureGreen(); - - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false))); - logger.info("register warmer with no query cache, validate no cache is used"); - assertAcked(client().admin().indices().preparePutWarmer("warmer_1") - .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())) - .get()); - - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); - - logger.info("register warmer with query cache, validate caching happened"); - assertAcked(client().admin().indices().preparePutWarmer("warmer_1") - .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()).setRequestCache(true)) - .get()); - - // index again, to make sure it gets refreshed - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); - - client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache - assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); - - logger.info("enable default query caching on the index level, and test that no flag on warmer still caches"); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true))); - - assertAcked(client().admin().indices().preparePutWarmer("warmer_1") - .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())) - .get()); - - // index again, to make sure it gets refreshed - client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); - } -} diff --git a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java index b11f24377ad..178f070927c 100644 --- a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.percolate.PercolateResponse; +import org.elasticsearch.action.percolate.PercolateSourceBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -187,18 +188,21 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { case 0: response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) .setSource(onlyField1) + .setRefresh(true) .execute().actionGet(); type1.incrementAndGet(); break; case 1: response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) .setSource(onlyField2) + .setRefresh(true) .execute().actionGet(); type2.incrementAndGet(); break; case 2: response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) .setSource(field1And2) + .setRefresh(true) .execute().actionGet(); type3.incrementAndGet(); break; @@ -247,7 +251,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { .setSource(onlyField1Doc).execute().actionGet(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected)); + assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); break; case 1: atLeastExpected = type2.get(); @@ -255,7 +259,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { .setSource(onlyField2Doc).execute().actionGet(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected)); + assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); break; case 2: atLeastExpected = type3.get(); @@ -263,7 +267,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { .setSource(field1AndField2Doc).execute().actionGet(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected)); + assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); break; } } @@ -327,6 +331,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { } while (!liveIds.remove(id)); DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id) + .setRefresh(true) .execute().actionGet(); assertThat(response.getId(), equalTo(id)); assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true)); @@ -334,6 +339,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { String id = Integer.toString(idGen.getAndIncrement()); IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) .setSource(doc) + .setRefresh(true) .execute().actionGet(); liveIds.add(id); assertThat(response.isCreated(), equalTo(true)); // We only add new docs @@ -357,9 +363,9 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { indexThreads[i].start(); } - XContentBuilder percolateDoc = XContentFactory.jsonBuilder().startObject().startObject("doc") + String percolateDoc = XContentFactory.jsonBuilder().startObject() .field("field1", "value") - .endObject().endObject(); + .endObject().string(); for (int counter = 0; counter < numberPercolateOperation; counter++) { Thread.sleep(5); semaphore.acquire(numIndexThreads); @@ -369,7 +375,9 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase { } int atLeastExpected = liveIds.size(); PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(percolateDoc).execute().actionGet(); + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(percolateDoc)) + .setSize(atLeastExpected) + .get(); assertThat(response.getShardFailures(), emptyArray()); assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); assertThat(response.getMatches().length, equalTo(atLeastExpected)); diff --git a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java index 7674ef83b5c..811f010d099 100644 --- a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java @@ -73,6 +73,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); + refresh(); MultiPercolateResponse response = client().prepareMultiPercolate() .add(client().preparePercolate() @@ -146,6 +147,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); + refresh(); MultiPercolateResponse response = client().prepareMultiPercolate() .add(client().preparePercolate() @@ -214,6 +216,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1") .setSource(jsonBuilder().startObject().field("field", "a")) .execute().actionGet(); + refresh(); MultiPercolateRequestBuilder builder = client().prepareMultiPercolate(); int numPercolateRequest = randomIntBetween(50, 100); @@ -221,7 +224,9 @@ public class MultiPercolatorIT extends ESIntegTestCase { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("1")) - .setIndices("test").setDocumentType("type")); + .setIndices("test").setDocumentType("type") + .setSize(numQueries) + ); } MultiPercolateResponse response = builder.execute().actionGet(); @@ -238,7 +243,8 @@ public class MultiPercolatorIT extends ESIntegTestCase { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("2")) - .setIndices("test").setDocumentType("type")); + .setIndices("test").setDocumentType("type").setSize(numQueries) + ); } response = builder.execute().actionGet(); @@ -255,12 +261,14 @@ public class MultiPercolatorIT extends ESIntegTestCase { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("2")) - .setIndices("test").setDocumentType("type")); + .setIndices("test").setDocumentType("type").setSize(numQueries) + ); } builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("1")) - .setIndices("test").setDocumentType("type")); + .setIndices("test").setDocumentType("type").setSize(numQueries) + ); response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest + 1)); @@ -282,6 +290,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } + refresh(); MultiPercolateRequestBuilder builder = client().prepareMultiPercolate(); int numPercolateRequest = randomIntBetween(50, 100); @@ -289,6 +298,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { builder.add( client().preparePercolate() .setIndices("test").setDocumentType("type") + .setSize(numQueries) .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); } @@ -331,6 +341,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { } builder.add( client().preparePercolate() + .setSize(numQueries) .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java new file mode 100644 index 00000000000..a8897824738 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java @@ -0,0 +1,196 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.apache.lucene.index.Term; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.Version; +import org.elasticsearch.action.percolate.PercolateShardRequest; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.QueryParser; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.TermQueryParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.AggregationBinaryParseElement; +import org.elasticsearch.search.aggregations.AggregationParseElement; +import org.elasticsearch.search.aggregations.AggregationPhase; +import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.highlight.HighlightPhase; +import org.elasticsearch.search.highlight.Highlighters; +import org.elasticsearch.search.sort.SortParseElement; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class PercolateDocumentParserTests extends ESTestCase { + + private Index index; + private MapperService mapperService; + private PercolateDocumentParser parser; + private QueryShardContext queryShardContext; + + @Before + public void init() { + index = new Index("_index"); + IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings( + Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .build(), + Settings.EMPTY, Collections.emptyList() + ); + AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); + IndicesModule indicesModule = new IndicesModule(); + mapperService = new MapperService(indexSettings, analysisService, new SimilarityService(indexSettings, Collections.emptyMap()), indicesModule.getMapperRegistry(), () -> null); + + Set parsers = Collections.singleton(new TermQueryParser()); + IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(indexSettings.getSettings(), parsers, new NamedWriteableRegistry()); + + queryShardContext = new QueryShardContext(indexSettings, null, null, null, mapperService, null, null, indicesQueriesRegistry); + + HighlightPhase highlightPhase = new HighlightPhase(Settings.EMPTY, new Highlighters()); + AggregatorParsers aggregatorParsers = new AggregatorParsers(Collections.emptySet(), Collections.emptySet()); + AggregationPhase aggregationPhase = new AggregationPhase(new AggregationParseElement(aggregatorParsers), new AggregationBinaryParseElement(aggregatorParsers)); + MappingUpdatedAction mappingUpdatedAction = Mockito.mock(MappingUpdatedAction.class); + parser = new PercolateDocumentParser( + highlightPhase, new SortParseElement(), aggregationPhase, mappingUpdatedAction + ); + } + + public void testParseDoc() throws Exception { + XContentBuilder source = jsonBuilder().startObject() + .startObject("doc") + .field("field1", "value1") + .endObject() + .endObject(); + PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); + request.documentType("type"); + request.source(source.bytes()); + + PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); + ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); + assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); + } + + public void testParseDocAndOtherOptions() throws Exception { + XContentBuilder source = jsonBuilder().startObject() + .startObject("doc") + .field("field1", "value1") + .endObject() + .startObject("query") + .startObject("term").field("field1", "value1").endObject() + .endObject() + .field("track_scores", true) + .field("size", 123) + .startObject("sort").startObject("_score").endObject().endObject() + .endObject(); + PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); + request.documentType("type"); + request.source(source.bytes()); + + PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); + ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); + assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); + assertThat(context.percolateQuery(), equalTo(new TermQuery(new Term("field1", "value1")))); + assertThat(context.trackScores(), is(true)); + assertThat(context.size(), is(123)); + assertThat(context.sort(), nullValue()); + } + + public void testParseDocSource() throws Exception { + XContentBuilder source = jsonBuilder().startObject() + .startObject("query") + .startObject("term").field("field1", "value1").endObject() + .endObject() + .field("track_scores", true) + .field("size", 123) + .startObject("sort").startObject("_score").endObject().endObject() + .endObject(); + XContentBuilder docSource = jsonBuilder().startObject() + .field("field1", "value1") + .endObject(); + PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); + request.documentType("type"); + request.source(source.bytes()); + request.docSource(docSource.bytes()); + + PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); + ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); + assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); + assertThat(context.percolateQuery(), equalTo(new TermQuery(new Term("field1", "value1")))); + assertThat(context.trackScores(), is(true)); + assertThat(context.size(), is(123)); + assertThat(context.sort(), nullValue()); + } + + public void testParseDocSourceAndSource() throws Exception { + XContentBuilder source = jsonBuilder().startObject() + .startObject("doc") + .field("field1", "value1") + .endObject() + .startObject("query") + .startObject("term").field("field1", "value1").endObject() + .endObject() + .field("track_scores", true) + .field("size", 123) + .startObject("sort").startObject("_score").endObject().endObject() + .endObject(); + XContentBuilder docSource = jsonBuilder().startObject() + .field("field1", "value1") + .endObject(); + PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); + request.documentType("type"); + request.source(source.bytes()); + request.docSource(docSource.bytes()); + + PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); + try { + parser.parse(request, context, mapperService, queryShardContext); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("Can't specify the document to percolate in the source of the request and as document id")); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java similarity index 93% rename from core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsIT.java rename to core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java index 85783e3d456..c32632bbf1d 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java @@ -49,9 +49,10 @@ import static org.hamcrest.Matchers.notNullValue; /** * */ -public class PercolatorFacetsAndAggregationsIT extends ESIntegTestCase { +public class PercolatorAggregationsIT extends ESIntegTestCase { + // Just test the integration with facets and aggregations, not the facet and aggregation functionality! - public void testFacetsAndAggregations() throws Exception { + public void testAggregations() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string", "field2", "type=string")); ensureGreen(); @@ -72,12 +73,15 @@ public class PercolatorFacetsAndAggregationsIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() .actionGet(); } - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); for (int i = 0; i < numQueries; i++) { String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() + .setIndices("test") + .setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())) + .setSize(expectedCount[i % numUniqueQueries]); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); @@ -134,12 +138,15 @@ public class PercolatorFacetsAndAggregationsIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() .actionGet(); } - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); for (int i = 0; i < numQueries; i++) { String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() + .setIndices("test") + .setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())) + .setSize(expectedCount[i % numUniqueQueries]); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); @@ -210,12 +217,15 @@ public class PercolatorFacetsAndAggregationsIT extends ESIntegTestCase { .execute() .actionGet(); } - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); for (int i = 0; i < numQueries; i++) { String value = "value0"; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") - .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() + .setIndices("test") + .setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())) + .setSize(numQueries); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); percolateRequestBuilder.addAggregation(AggregationBuilders.terms("terms").field("field2").collectMode(aggCollectionMode) diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityIT.java index 8254932c304..cb8ffb8e91f 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityIT.java @@ -18,54 +18,104 @@ */ package org.elasticsearch.percolator; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateSourceBuilder; -import org.elasticsearch.index.percolator.PercolatorException; -import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; -import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; -/** - */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +@LuceneTestCase.SuppressFileSystems("ExtrasFS") public class PercolatorBackwardsCompatibilityIT extends ESIntegTestCase { - public void testPercolatorUpgrading() throws Exception { - // Simulates an index created on an node before 1.4.0 where the field resolution isn't strict. - assertAcked(prepareCreate("test") - .setSettings(settings(Version.V_1_3_0).put(indexSettings()))); - ensureGreen(); - int numDocs = randomIntBetween(100, 150); - IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; - for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test", PercolatorService.TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()); - } - indexRandom(true, docs); - PercolateResponse response = client().preparePercolate().setIndices("test").setDocumentType("type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("field1", "value")) - .get(); - assertMatchCount(response, numDocs); - // After upgrade indices, indices created before the upgrade allow that queries refer to fields not available in mapping - client().prepareIndex("test", PercolatorService.TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()).get(); + private final static String INDEX_NAME = "percolator_index"; - // However on new indices, the field resolution is strict, no queries with unmapped fields are allowed - createIndex("test2"); - try { - client().prepareIndex("test2", PercolatorService.TYPE_NAME) - .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()).get(); - fail(); - } catch (PercolatorException e) { - e.printStackTrace(); - assertThat(e.getRootCause(), instanceOf(QueryShardException.class)); + public void testOldPercolatorIndex() throws Exception { + setupNode(); + + // verify cluster state: + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertThat(state.metaData().indices().size(), equalTo(1)); + assertThat(state.metaData().indices().get(INDEX_NAME), notNullValue()); + assertThat(state.metaData().indices().get(INDEX_NAME).getCreationVersion(), equalTo(Version.V_2_0_0)); + assertThat(state.metaData().indices().get(INDEX_NAME).getUpgradedVersion(), equalTo(Version.CURRENT)); + assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().size(), equalTo(2)); + assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator"), notNullValue()); + // important: verify that the query field in the .percolator mapping is of type object (from 3.0.0 this is of type percolator) + MappingMetaData mappingMetaData = state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator"); + assertThat(XContentMapValues.extractValue("properties.query.type", mappingMetaData.sourceAsMap()), equalTo("object")); + assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().get("message"), notNullValue()); + + // verify existing percolator queries: + SearchResponse searchResponse = client().prepareSearch(INDEX_NAME) + .setTypes(".percolator") + .addSort("_id", SortOrder.ASC) + .get(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3")); + + // verify percolate response + PercolateResponse percolateResponse = client().preparePercolate() + .setIndices(INDEX_NAME) + .setDocumentType("message") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog")) + .get(); + + assertThat(percolateResponse.getCount(), equalTo(2L)); + assertThat(percolateResponse.getMatches().length, equalTo(2)); + assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); + assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); + + // add an extra query and verify the results + client().prepareIndex(INDEX_NAME, ".percolator", "4") + .setSource(jsonBuilder().startObject().field("query", matchQuery("message", "fox jumps")).endObject()) + .get(); + refresh(); + + percolateResponse = client().preparePercolate() + .setIndices(INDEX_NAME) + .setDocumentType("message") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("message", "the quick brown fox jumps over the lazy dog")) + .get(); + + assertThat(percolateResponse.getCount(), equalTo(3L)); + assertThat(percolateResponse.getMatches().length, equalTo(3)); + assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); + assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); + assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4")); + } + + private void setupNode() throws Exception { + Path dataDir = createTempDir(); + Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName())); + try (InputStream stream = PercolatorBackwardsCompatibilityIT.class.getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) { + TestUtil.unzip(stream, clusterDir); } + + Settings.Builder nodeSettings = Settings.builder() + .put("path.data", dataDir); + internalCluster().startNode(nodeSettings.build()); + ensureGreen(INDEX_NAME); } } diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index be1acb1218d..0c16c981847 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.percolator; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -39,7 +38,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.percolator.PercolatorException; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryShardException; @@ -102,6 +101,7 @@ import static org.hamcrest.Matchers.nullValue; * */ public class PercolatorIT extends ESIntegTestCase { + public void testSimple1() throws Exception { client().admin().indices().prepareCreate("test").execute().actionGet(); ensureGreen(); @@ -125,7 +125,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Percolate doc with field1=b"); PercolateResponse response = client().preparePercolate() @@ -200,6 +200,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "test1") .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()) .execute().actionGet(); + refresh(); response = client().preparePercolate() .setIndices("test").setDocumentType("type1") @@ -212,6 +213,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "test2") .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject()) .execute().actionGet(); + refresh(); response = client().preparePercolate() .setIndices("test").setDocumentType("type1") @@ -223,6 +225,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareDelete("test", PercolatorService.TYPE_NAME, "test2").execute().actionGet(); + refresh(); response = client().preparePercolate() .setIndices("test").setDocumentType("type1") .setSource(doc).execute().actionGet(); @@ -244,11 +247,13 @@ public class PercolatorIT extends ESIntegTestCase { .setRouting(Integer.toString(i % 2)) .execute().actionGet(); } + refresh(); logger.info("--> Percolate doc with no routing"); PercolateResponse response = client().preparePercolate() .setIndices("test").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) + .setSize(100) .execute().actionGet(); assertMatchCount(response, 100l); assertThat(response.getMatches(), arrayWithSize(100)); @@ -256,23 +261,25 @@ public class PercolatorIT extends ESIntegTestCase { logger.info("--> Percolate doc with routing=0"); response = client().preparePercolate() .setIndices("test").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) - .setRouting("0") - .execute().actionGet(); + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) + .setSize(100) + .setRouting("0") + .execute().actionGet(); assertMatchCount(response, 50l); assertThat(response.getMatches(), arrayWithSize(50)); logger.info("--> Percolate doc with routing=1"); response = client().preparePercolate() .setIndices("test").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject()) + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())) + .setSize(100) .setRouting("1") .execute().actionGet(); assertMatchCount(response, 50l); assertThat(response.getMatches(), arrayWithSize(50)); } - public void testStorePeroclateQueriesOnRecreatedIndex() throws Exception { + public void storePercolateQueriesOnRecreatedIndex() throws Exception { createIndex("test"); ensureGreen(); @@ -326,6 +333,7 @@ public class PercolatorIT extends ESIntegTestCase { .endObject()) .setRefresh(true) .execute().actionGet(); + refresh(); PercolateResponse percolate = client().preparePercolate() .setIndices("test").setDocumentType("doc") @@ -352,7 +360,6 @@ public class PercolatorIT extends ESIntegTestCase { .field("query", termQuery("field1", "value1")) .endObject()) .execute().actionGet(); - refresh(); SearchResponse countResponse = client().prepareSearch().setSize(0) .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME) @@ -511,7 +518,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> First percolate request"); PercolateResponse response = client().preparePercolate() @@ -613,7 +620,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Percolate existing doc with id 1"); PercolateResponse response = client().preparePercolate() @@ -686,7 +693,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Percolate existing doc with id 1"); PercolateResponse response = client().preparePercolate() @@ -751,7 +758,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Percolate existing doc with id 2 and version 1"); PercolateResponse response = client().preparePercolate() @@ -796,6 +803,7 @@ public class PercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } + refresh(); logger.info("--> Percolate doc to index test1"); PercolateResponse response = client().preparePercolate() @@ -962,7 +970,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Count percolate doc with field1=b"); PercolateResponse response = client().preparePercolate() @@ -1033,7 +1041,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refresh(); logger.info("--> Count percolate existing doc with id 1"); PercolateResponse response = client().preparePercolate() @@ -1083,12 +1091,14 @@ public class PercolatorIT extends ESIntegTestCase { .execute().actionGet(); } } + refresh(); boolean onlyCount = randomBoolean(); PercolateResponse response = client().preparePercolate() .setIndices("my-index").setDocumentType("my-type") .setOnlyCount(onlyCount) .setPercolateDoc(docBuilder().setDoc("field", "value")) + .setSize((int) totalQueries) .execute().actionGet(); assertMatchCount(response, totalQueries); if (!onlyCount) { @@ -1108,7 +1118,7 @@ public class PercolatorIT extends ESIntegTestCase { } // The query / filter capabilities are NOT in realtime - client().admin().indices().prepareRefresh("my-index").execute().actionGet(); + refresh(); int runs = randomIntBetween(3, 16); for (int i = 0; i < runs; i++) { @@ -1118,6 +1128,7 @@ public class PercolatorIT extends ESIntegTestCase { .setOnlyCount(onlyCount) .setPercolateDoc(docBuilder().setDoc("field", "value")) .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1))) + .setSize((int) numQueriesPerLevel) .execute().actionGet(); assertMatchCount(response, numQueriesPerLevel); if (!onlyCount) { @@ -1132,6 +1143,7 @@ public class PercolatorIT extends ESIntegTestCase { .setOnlyCount(onlyCount) .setPercolateDoc(docBuilder().setDoc("field", "value")) .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1))) + .setSize((int) numQueriesPerLevel) .execute().actionGet(); assertMatchCount(response, numQueriesPerLevel); if (!onlyCount) { @@ -1268,18 +1280,6 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(response.getMatches()[0].getScore(), equalTo(2f)); assertThat(response.getMatches()[1].getId().string(), equalTo("1")); assertThat(response.getMatches()[1].getScore(), equalTo(1f)); - - response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type") - .setSortByScore(true) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .execute().actionGet(); - assertThat(response.getCount(), equalTo(0l)); - assertThat(response.getShardFailures().length, greaterThan(0)); - for (ShardOperationFailedException failure : response.getShardFailures()) { - assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(failure.reason(), containsString("Can't sort if size isn't specified")); - } } public void testPercolateSortingUnsupportedField() throws Exception { @@ -1322,25 +1322,6 @@ public class PercolatorIT extends ESIntegTestCase { assertMatchCount(response, 0l); } - public void testPercolateNotEmptyIndexButNoRefresh() throws Exception { - client().admin().indices().prepareCreate("my-index") - .setSettings(settingsBuilder().put("index.refresh_interval", -1)) - .execute().actionGet(); - ensureGreen(); - - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject()) - .execute().actionGet(); - - PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type") - .setSortByScore(true) - .setSize(2) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .execute().actionGet(); - assertMatchCount(response, 0l); - } - public void testPercolatorWithHighlighting() throws Exception { StringBuilder fieldMapping = new StringBuilder("type=string") .append(",store=").append(randomBoolean()); @@ -1367,6 +1348,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "5") .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject()) .execute().actionGet(); + refresh(); logger.info("--> Percolate doc with field1=The quick brown fox jumps over the lazy dog"); PercolateResponse response = client().preparePercolate() @@ -1393,9 +1375,6 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); - // Anything with percolate query isn't realtime - client().admin().indices().prepareRefresh("test").execute().actionGet(); - logger.info("--> Query percolate doc with field1=The quick brown fox jumps over the lazy dog"); response = client().preparePercolate() .setIndices("test").setDocumentType("type") @@ -1522,6 +1501,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1") .setSource(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()) .get(); + refresh(); logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog"); response = client().preparePercolate() @@ -1569,6 +1549,7 @@ public class PercolatorIT extends ESIntegTestCase { .endObject()) .setRefresh(true) .execute().actionGet(); + refresh(); PercolateResponse percolate = client().preparePercolate() .setIndices("test").setDocumentType("doc") @@ -1638,8 +1619,9 @@ public class PercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:red")).endObject()) .get(); fail(); - } catch (PercolatorException e) { + } catch (MapperParsingException e) { } + refresh(); PercolateResponse percolateResponse = client().preparePercolate().setDocumentType("type") .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject())) @@ -1656,6 +1638,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("custom.color:blue")).field("type", "type").endObject()) .get(); + refresh(); // The second request will yield a match, since the query during the proper field during parsing. percolateResponse = client().preparePercolate().setDocumentType("type") @@ -1725,7 +1708,7 @@ public class PercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()) .get(); fail(); - } catch (PercolatorException e) { + } catch (MapperParsingException e) { assertThat(e.getRootCause(), instanceOf(QueryShardException.class)); } @@ -1734,7 +1717,7 @@ public class PercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(0).to(1)).endObject()) .get(); fail(); - } catch (PercolatorException e) { + } catch (MapperParsingException e) { assertThat(e.getRootCause(), instanceOf(QueryShardException.class)); } } @@ -1751,6 +1734,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", constantScoreQuery(rangeQuery("timestamp").from("now-1d").to("now"))).endObject()) .get(); + refresh(); logger.info("--> Percolate doc with field1=b"); PercolateResponse response = client().preparePercolate() @@ -1797,7 +1781,6 @@ public class PercolatorIT extends ESIntegTestCase { return doc; } - // issue public void testNestedDocFilter() throws IOException { String mapping = "{\n" + " \"doc\": {\n" + @@ -1943,6 +1926,7 @@ public class PercolatorIT extends ESIntegTestCase { .setSettings(settings)); client().prepareIndex("test", PercolatorService.TYPE_NAME) .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get(); + refresh(); logger.info("--> Percolate doc with field1=value"); PercolateResponse response1 = client().preparePercolate() .setIndices("test").setDocumentType("type") @@ -1994,6 +1978,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); + refresh(); // Just percolating a document that has a _parent field in its mapping should just work: PercolateResponse response = client().preparePercolate() diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java new file mode 100644 index 00000000000..170b0be30df --- /dev/null +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java @@ -0,0 +1,258 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.ExtractQueryTermsService; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class PercolatorQueryTests extends ESTestCase { + + public final static String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; + public final static String UNKNOWN_QUERY_FIELD_NAME = "unknown_query"; + public static FieldType EXTRACTED_TERMS_FIELD_TYPE = new FieldType(); + + static { + EXTRACTED_TERMS_FIELD_TYPE.setTokenized(false); + EXTRACTED_TERMS_FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + EXTRACTED_TERMS_FIELD_TYPE.freeze(); + } + + private Directory directory; + private IndexWriter indexWriter; + private Map queries; + private DirectoryReader directoryReader; + + @Before + public void init() throws Exception { + directory = newDirectory(); + queries = new HashMap<>(); + IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer()); + config.setMergePolicy(NoMergePolicy.INSTANCE); + indexWriter = new IndexWriter(directory, config); + } + + @After + public void destroy() throws Exception { + directoryReader.close(); + directory.close(); + } + + public void testVariousQueries() throws Exception { + addPercolatorQuery("1", new TermQuery(new Term("field", "brown"))); + addPercolatorQuery("2", new TermQuery(new Term("field", "monkey"))); + addPercolatorQuery("3", new TermQuery(new Term("field", "fox"))); + BooleanQuery.Builder bq1 = new BooleanQuery.Builder(); + bq1.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.SHOULD); + bq1.add(new TermQuery(new Term("field", "monkey")), BooleanClause.Occur.SHOULD); + addPercolatorQuery("4", bq1.build()); + BooleanQuery.Builder bq2 = new BooleanQuery.Builder(); + bq2.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.MUST); + bq2.add(new TermQuery(new Term("field", "monkey")), BooleanClause.Occur.MUST); + addPercolatorQuery("5", bq2.build()); + BooleanQuery.Builder bq3 = new BooleanQuery.Builder(); + bq3.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.MUST); + bq3.add(new TermQuery(new Term("field", "apes")), BooleanClause.Occur.MUST_NOT); + addPercolatorQuery("6", bq3.build()); + BooleanQuery.Builder bq4 = new BooleanQuery.Builder(); + bq4.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.MUST_NOT); + bq4.add(new TermQuery(new Term("field", "apes")), BooleanClause.Occur.MUST); + addPercolatorQuery("7", bq4.build()); + PhraseQuery.Builder pq1 = new PhraseQuery.Builder(); + pq1.add(new Term("field", "lazy")); + pq1.add(new Term("field", "dog")); + addPercolatorQuery("8", pq1.build()); + + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + + PercolatorQuery.Builder builder = new PercolatorQuery.Builder( + percolateSearcher, + queries, + new MatchAllDocsQuery() + ); + builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME); + TopDocs topDocs = shardSearcher.search(builder.build(), 10); + assertThat(topDocs.totalHits, equalTo(5)); + assertThat(topDocs.scoreDocs.length, equalTo(5)); + assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); + assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); + assertThat(topDocs.scoreDocs[2].doc, equalTo(3)); + assertThat(topDocs.scoreDocs[3].doc, equalTo(5)); + assertThat(topDocs.scoreDocs[4].doc, equalTo(7)); + } + + public void testWithScoring() throws Exception { + addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), "field", "value1"); + + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + + PercolatorQuery.Builder builder = new PercolatorQuery.Builder( + percolateSearcher, + queries, + new MatchAllDocsQuery() + ); + builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME); + builder.setPercolateQuery(new TermQuery(new Term("field", "value1"))); + + PercolatorQuery percolatorQuery = builder.build(); + TopDocs topDocs = shardSearcher.search(percolatorQuery, 1); + assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.scoreDocs.length, equalTo(1)); + assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); + assertThat(topDocs.scoreDocs[0].score, not(1f)); + + Explanation explanation = shardSearcher.explain(percolatorQuery, 0); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); + } + + public void testDuel() throws Exception { + int numQueries = scaledRandomIntBetween(32, 256); + for (int i = 0; i < numQueries; i++) { + String id = Integer.toString(i); + Query query; + if (randomBoolean()) { + query = new PrefixQuery(new Term("field", id)); + } else if (randomBoolean()) { + query = new WildcardQuery(new Term("field", id + "*")); + } else if (randomBoolean()) { + query = new CustomQuery(new Term("field", id + "*")); + } else { + query = new TermQuery(new Term("field", id)); + } + addPercolatorQuery(id, query); + } + + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + + for (int i = 0; i < numQueries; i++) { + MemoryIndex memoryIndex = new MemoryIndex(); + String id = Integer.toString(i); + memoryIndex.addField("field", id, new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + + PercolatorQuery.Builder builder1 = new PercolatorQuery.Builder( + percolateSearcher, + queries, + new MatchAllDocsQuery() + ); + // enables the optimization that prevents queries from being evaluated that don't match + builder1.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME); + TopDocs topDocs1 = shardSearcher.search(builder1.build(), 10); + + PercolatorQuery.Builder builder2 = new PercolatorQuery.Builder( + percolateSearcher, + queries, + new MatchAllDocsQuery() + ); + TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10); + + assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits)); + assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length)); + for (int j = 0; j < topDocs1.scoreDocs.length; j++) { + assertThat(topDocs1.scoreDocs[j].doc, equalTo(topDocs2.scoreDocs[j].doc)); + } + } + } + + void addPercolatorQuery(String id, Query query, String... extraFields) throws IOException { + queries.put(new BytesRef(id), query); + ParseContext.Document document = new ParseContext.Document(); + ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME, EXTRACTED_TERMS_FIELD_TYPE); + document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id))); + assert extraFields.length % 2 == 0; + for (int i = 0; i < extraFields.length; i++) { + document.add(new StringField(extraFields[i], extraFields[++i], Field.Store.NO)); + } + indexWriter.addDocument(document); + } + + private final static class CustomQuery extends Query { + + private final Term term; + + private CustomQuery(Term term) { + this.term = term; + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + return new TermQuery(term); + } + + @Override + public String toString(String field) { + return "custom{" + field + "}"; + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java new file mode 100644 index 00000000000..49635abc8de --- /dev/null +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.action.percolate.PercolateShardResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; +import org.elasticsearch.index.percolator.ExtractQueryTermsService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PercolatorServiceTests extends ESTestCase { + + private Directory directory; + private IndexWriter indexWriter; + private DirectoryReader directoryReader; + + @Before + public void init() throws Exception { + directory = newDirectory(); + IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer()); + config.setMergePolicy(NoMergePolicy.INSTANCE); + indexWriter = new IndexWriter(directory, config); + } + + @After + public void destroy() throws Exception { + directoryReader.close(); + directory.close(); + } + + public void testCount() throws Exception { + PercolateContext context = mock(PercolateContext.class); + when(context.shardTarget()).thenReturn(new SearchShardTarget("_id", "_index", 0)); + when(context.percolatorTypeFilter()).thenReturn(new MatchAllDocsQuery()); + when(context.isOnlyCount()).thenReturn(true); + + PercolatorQueriesRegistry registry = createRegistry(); + addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), indexWriter, registry); + addPercolatorQuery("2", new TermQuery(new Term("field", "fox")), indexWriter, registry); + addPercolatorQuery("3", new TermQuery(new Term("field", "monkey")), indexWriter, registry); + + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + when(context.searcher()).thenReturn(new ContextIndexSearcher(new Engine.Searcher("test", shardSearcher), shardSearcher.getQueryCache(), shardSearcher.getQueryCachingPolicy())); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + when(context.docSearcher()).thenReturn(percolateSearcher); + + PercolateShardResponse response = PercolatorService.doPercolate(context, registry, null, null, null); + assertThat(response.topDocs().totalHits, equalTo(2)); + } + + public void testTopMatching() throws Exception { + PercolateContext context = mock(PercolateContext.class); + when(context.shardTarget()).thenReturn(new SearchShardTarget("_id", "_index", 0)); + when(context.percolatorTypeFilter()).thenReturn(new MatchAllDocsQuery()); + when(context.size()).thenReturn(10); + + PercolatorQueriesRegistry registry = createRegistry(); + addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), indexWriter, registry); + addPercolatorQuery("2", new TermQuery(new Term("field", "monkey")), indexWriter, registry); + addPercolatorQuery("3", new TermQuery(new Term("field", "fox")), indexWriter, registry); + + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + when(context.searcher()).thenReturn(new ContextIndexSearcher(new Engine.Searcher("test", shardSearcher), shardSearcher.getQueryCache(), shardSearcher.getQueryCachingPolicy())); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + when(context.docSearcher()).thenReturn(percolateSearcher); + + PercolateShardResponse response = PercolatorService.doPercolate(context, registry, null, null, null); + TopDocs topDocs = response.topDocs(); + assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.scoreDocs.length, equalTo(2)); + assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); + assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); + } + + void addPercolatorQuery(String id, Query query, IndexWriter writer, PercolatorQueriesRegistry registry) throws IOException { + registry.getPercolateQueries().put(new BytesRef(id), query); + ParseContext.Document document = new ParseContext.Document(); + FieldType extractedQueryTermsFieldType = new FieldType(); + extractedQueryTermsFieldType.setTokenized(false); + extractedQueryTermsFieldType.setIndexOptions(IndexOptions.DOCS); + extractedQueryTermsFieldType.freeze(); + ExtractQueryTermsService.extractQueryTerms(query, document, PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME, extractedQueryTermsFieldType); + document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id))); + writer.addDocument(document); + } + + PercolatorQueriesRegistry createRegistry() { + Index index = new Index("_index"); + IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings( + Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .build(), + Settings.EMPTY, Collections.emptyList() + ); + return new PercolatorQueriesRegistry( + new ShardId(index, 0), + indexSettings, + null + ); + } + +} diff --git a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java index 6a0485133a2..57eb2e37ddf 100644 --- a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java @@ -136,7 +136,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { DeleteIndexResponse actionGet = client().admin().indices().prepareDelete("test").get(); assertThat(actionGet.isAcknowledged(), equalTo(true)); - client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).get(); + assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=string").addMapping(PercolatorService.TYPE_NAME, "color", "type=string")); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); @@ -193,6 +193,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { .endObject()) .get(); } + refresh(); logger.info("--> Percolate doc with field1=95"); PercolateResponse response = client().preparePercolate() @@ -222,6 +223,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { percolatorRecovery(false); } + @AwaitsFix(bugUrl = "sometimes reprodes with: gradle :core:integTest -Dtests.seed=21DDCAA92013B00C -Dtests.class=org.elasticsearch.percolator.RecoveryPercolatorIT -Dtests.method=\"testMultiPercolatorRecovery\"") public void testMultiPercolatorRecovery() throws Exception { percolatorRecovery(true); } @@ -249,6 +251,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .get(); } + refresh(); final String document = "{\"field\" : \"a\"}"; client.prepareIndex("test", "type", "1") @@ -269,7 +272,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { for (int i = 0; i < numPercolateRequest; i++) { PercolateRequestBuilder percolateBuilder = client.preparePercolate() - .setIndices("test").setDocumentType("type"); + .setIndices("test").setDocumentType("type").setSize(numQueries); if (randomBoolean()) { percolateBuilder.setGetRequest(Requests.getRequest("test").type("type").id("1")); } else { @@ -289,7 +292,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase { } } else { PercolateRequestBuilder percolateBuilder = client.preparePercolate() - .setIndices("test").setDocumentType("type"); + .setIndices("test").setDocumentType("type").setSize(numQueries); if (randomBoolean()) { percolateBuilder.setPercolateDoc(docBuilder().setDoc(document)); } else { diff --git a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java index 4b4d4a84237..43ca89923f2 100644 --- a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java @@ -182,7 +182,7 @@ public class TTLPercolatorIT extends ESIntegTestCase { .endObject() .endObject() .endObject() - ).setTTL(randomIntBetween(1, 500)).execute().actionGet(); + ).setTTL(randomIntBetween(1, 500)).setRefresh(true).execute().actionGet(); } catch (MapperParsingException e) { logger.info("failed indexing {}", i, e); // if we are unlucky the TTL is so small that we see the expiry date is already in the past when diff --git a/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java b/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java index 2a8299226c0..238e16d4b44 100644 --- a/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java +++ b/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java @@ -327,7 +327,6 @@ public class HeadersAndContextCopyClientTests extends ESTestCase { client.admin().indices().prepareCreate("test"), client.admin().indices().prepareAliases(), client.admin().indices().prepareAnalyze("text"), - client.admin().indices().prepareDeleteWarmer(), client.admin().indices().prepareTypesExists("type"), client.admin().indices().prepareClose() }; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index b5ef5d9eb4f..17e8fd35073 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -34,10 +34,13 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; @@ -110,7 +113,8 @@ public class NestedAggregatorTests extends ESSingleNodeTestCase { indexWriter.commit(); indexWriter.close(); - DirectoryReader directoryReader = DirectoryReader.open(directory); + DirectoryReader directoryReader = DirectoryReader.open(directory); + directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("test"), 0)); IndexSearcher searcher = new IndexSearcher(directoryReader); IndexService indexService = createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 1ae211bc242..88943f87ee7 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes"); - client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape") + client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree") .execute().actionGet(); XContentBuilder docSource = gcb.toXContent(jsonBuilder().startObject().field("location"), null).endObject(); @@ -317,10 +317,10 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { public void testContainsShapeQuery() throws Exception { // Create a random geometry collection. - Rectangle mbr = xRandomRectangle(getRandom(), xRandomPoint(getRandom())); + Rectangle mbr = xRandomRectangle(getRandom(), xRandomPoint(getRandom()), true); GeometryCollectionBuilder gcb = createGeometryCollectionWithin(getRandom(), mbr); - client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape") + client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree" ) .execute().actionGet(); XContentBuilder docSource = gcb.toXContent(jsonBuilder().startObject().field("location"), null).endObject(); @@ -333,7 +333,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { ShapeBuilder filterShape = (gcb.getShapeAt(randomIntBetween(0, gcb.numShapes() - 1))); GeoShapeQueryBuilder filter = QueryBuilders.geoShapeQuery("location", filterShape) - .relation(ShapeRelation.INTERSECTS); + .relation(ShapeRelation.CONTAINS); SearchResponse response = client().prepareSearch("test").setTypes("type").setQuery(QueryBuilders.matchAllQuery()) .setPostFilter(filter).get(); assertSearchResponse(response); @@ -343,7 +343,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { public void testShapeFilterWithDefinedGeoCollection() throws Exception { createIndex("shapes"); - client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape") + client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree") .execute().actionGet(); XContentBuilder docSource = jsonBuilder().startObject().startObject("location") diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 41fe4975e4b..80636388f54 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -802,9 +803,8 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); - client().prepareIndex("test", "type1") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); - refresh(); + indexRandom(true, client().prepareIndex("test", "type1") + .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource() @@ -822,7 +822,6 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().prepareSearch("test").setSource(source).get(); - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); logger.info("--> searching on _all, highlighting on field2"); @@ -832,7 +831,6 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().prepareSearch("test").setSource(source).get(); - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); logger.info("--> searching on _all, highlighting on field2"); @@ -842,8 +840,26 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().prepareSearch("test").setSource(source).get(); - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + + logger.info("--> searching with boundary characters"); + source = searchSource() + .query(matchQuery("field2", "quick")) + .highlighter(highlight().field("field2", 30, 1).boundaryChars(new char[] {' '})); + + searchResponse = client().prepareSearch("test").setSource(source).get(); + + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over")); + + logger.info("--> searching with boundary characters on the field"); + source = searchSource() + .query(matchQuery("field2", "quick")) + .highlighter(highlight().field(new Field("field2").fragmentSize(30).numOfFragments(1).boundaryChars(new char[] {' '}))); + + searchResponse = client().prepareSearch("test").setSource(source).get(); + + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over")); + } /** diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 1ef3fddcf8e..c6e93204b66 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -176,7 +176,10 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { private long stopWaitingAt = -1; public BlockingClusterStateListener(ClusterService clusterService, String blockOn, String countOn, Priority passThroughPriority) { - this(clusterService, blockOn, countOn, passThroughPriority, TimeValue.timeValueMinutes(1)); + // Waiting for the 70 seconds here to make sure that the last check at 65 sec mark in assertBusyPendingTasks has a chance + // to finish before we timeout on the cluster state block. Otherwise the last check in assertBusyPendingTasks kicks in + // after the cluster state block clean up takes place and it's assert doesn't reflect the actual failure + this(clusterService, blockOn, countOn, passThroughPriority, TimeValue.timeValueSeconds(70)); } public BlockingClusterStateListener(ClusterService clusterService, final String blockOn, final String countOn, Priority passThroughPriority, TimeValue timeout) { diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 669527fd5f3..e8ff9674eed 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1943,7 +1943,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - assertAcked(prepareCreate("test-idx", 0, settingsBuilder().put("number_of_shards", between(1, 20)) + assertAcked(prepareCreate("test-idx", 0, settingsBuilder().put("number_of_shards", between(1, 10)) .put("number_of_replicas", 0))); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 55744697b4d..cb0049dc84c 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -114,7 +114,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { throws InvalidShapeException { if (numGeometries <= 0) { // cap geometry collection at 4 shapes (to save test time) - numGeometries = RandomInts.randomIntBetween(r, 2, 5); + numGeometries = RandomInts.randomIntBetween(r, 2, 4); } if (nearPoint == null) { @@ -255,11 +255,31 @@ public class RandomShapeGenerator extends RandomGeoGenerator { return p; } - public static Rectangle xRandomRectangle(Random r, Point nearP) { - Rectangle bounds = ctx.getWorldBounds(); + private static Rectangle xRandomRectangle(Random r, Point nearP, Rectangle bounds, boolean small) { if (nearP == null) nearP = xRandomPointIn(r, bounds); + if (small == true) { + // between 3 and 6 degrees + final double latRange = 3 * r.nextDouble() + 3; + final double lonRange = 3 * r.nextDouble() + 3; + + double minX = nearP.getX(); + double maxX = minX + lonRange; + if (maxX > 180) { + maxX = minX; + minX -= lonRange; + } + double minY = nearP.getY(); + double maxY = nearP.getY() + latRange; + if (maxY > 90) { + maxY = minY; + minY -= latRange; + } + + return ctx.makeRectangle(minX, maxX, minY, maxY); + } + Range xRange = xRandomRange(r, rarely(r) ? 0 : nearP.getX(), Range.xRange(bounds, ctx)); Range yRange = xRandomRange(r, rarely(r) ? 0 : nearP.getY(), Range.yRange(bounds, ctx)); @@ -270,6 +290,14 @@ public class RandomShapeGenerator extends RandomGeoGenerator { xDivisible(yRange.getMax()*10e3)/10e3); } + public static Rectangle xRandomRectangle(Random r, Point nearP) { + return xRandomRectangle(r, nearP, ctx.getWorldBounds(), false); + } + + public static Rectangle xRandomRectangle(Random r, Point nearP, boolean small) { + return xRandomRectangle(r, nearP, ctx.getWorldBounds(), small); + } + private static boolean rarely(Random r) { return RandomInts.randomInt(r, 100) >= 90; } diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index e1b1c4451c9..09653c12e07 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.threadpool; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool.Names; @@ -89,6 +90,51 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } + public void testIndexingThreadPoolsMaxSize() throws InterruptedException { + String threadPoolName = randomThreadPoolName(); + for (String name : new String[] {ThreadPool.Names.BULK, ThreadPool.Names.INDEX}) { + ThreadPool threadPool = null; + try { + + int maxSize = EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + + // try to create a too-big (maxSize+1) thread pool + threadPool = new ThreadPool(settingsBuilder() + .put("name", "testIndexingThreadPoolsMaxSize") + .put("threadpool." + name + ".size", maxSize+1) + .build()); + + // confirm it clipped us at the maxSize: + assertEquals(maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); + + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); + + // update it to a tiny size: + clusterSettings.applySettings( + settingsBuilder() + .put("threadpool." + name + ".size", 1) + .build() + ); + + // confirm it worked: + assertEquals(1, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); + + // try to update to too-big size: + clusterSettings.applySettings( + settingsBuilder() + .put("threadpool." + name + ".size", maxSize+1) + .build() + ); + + // confirm it clipped us at the maxSize: + assertEquals(maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); + } finally { + terminateThreadPoolIfNeeded(threadPool); + } + } + } + public void testUpdateSettingsCanNotChangeThreadPoolType() throws InterruptedException { String threadPoolName = randomThreadPoolName(); ThreadPool.ThreadPoolType invalidThreadPoolType = randomIncorrectThreadPoolType(threadPoolName); @@ -165,6 +211,14 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } + private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { + if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + return Math.min(size, EsExecutors.boundedNumberOfProcessors(settings)); + } else { + return size; + } + } + public void testFixedExecutorType() throws InterruptedException { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); ThreadPool threadPool = null; @@ -179,12 +233,14 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".size", "15") .build()); + + int expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 15); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(15)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15)); - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(15)); - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); + assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); + assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); // keep alive does not apply to fixed thread pools assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); @@ -194,20 +250,23 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Make sure keep alive value is not used assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); // Make sure keep pool size value were reused - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(15)); - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15)); + assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); + assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(15)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); // Change size Executor oldExecutor = threadPool.executor(threadPoolName); settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); + + expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 10); + // Make sure size values changed - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(10)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(10)); + assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); + assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); // Make sure executor didn't change assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); diff --git a/core/src/test/resources/indices/bwc/index-2.0.0.zip b/core/src/test/resources/indices/bwc/index-2.0.0.zip index 7110fb424a8..b16a37fffcd 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.0.zip and b/core/src/test/resources/indices/bwc/index-2.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.1.zip b/core/src/test/resources/indices/bwc/index-2.0.1.zip index dccb7774fa6..3b1f32195e7 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.1.zip and b/core/src/test/resources/indices/bwc/index-2.0.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip index 2f77405a831..447d37255d5 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.2.zip and b/core/src/test/resources/indices/bwc/index-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.0.zip b/core/src/test/resources/indices/bwc/index-2.1.0.zip index 8c07e922260..23cc65b4ab5 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.1.0.zip and b/core/src/test/resources/indices/bwc/index-2.1.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.1.zip b/core/src/test/resources/indices/bwc/index-2.1.1.zip index 74c967d2c61..fa255dfce1a 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.1.1.zip and b/core/src/test/resources/indices/bwc/index-2.1.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0.zip b/core/src/test/resources/indices/bwc/repo-2.0.0.zip index 9605830a12c..60b01723f29 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.0.zip and b/core/src/test/resources/indices/bwc/repo-2.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.1.zip b/core/src/test/resources/indices/bwc/repo-2.0.1.zip index 305820877bb..44701b8df59 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.1.zip and b/core/src/test/resources/indices/bwc/repo-2.0.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip index 696ffd939d5..eab763106f7 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.2.zip and b/core/src/test/resources/indices/bwc/repo-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.0.zip b/core/src/test/resources/indices/bwc/repo-2.1.0.zip index 2f287ea3481..1165341de36 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.1.0.zip and b/core/src/test/resources/indices/bwc/repo-2.1.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip index 3253da62c3f..2b5bce15bc7 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.1.1.zip and b/core/src/test/resources/indices/bwc/repo-2.1.1.zip differ diff --git a/core/src/test/resources/indices/percolator/bwc_index_2.0.0.zip b/core/src/test/resources/indices/percolator/bwc_index_2.0.0.zip new file mode 100644 index 00000000000..f0e2d05e4af Binary files /dev/null and b/core/src/test/resources/indices/percolator/bwc_index_2.0.0.zip differ diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index 83a35941577..af5945a418e 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -257,10 +257,19 @@ def generate_index(client, version, index_name): # Same as ES default (5 GB), but missing the units to make sure they are inserted on upgrade: settings['merge.policy.max_merged_segment'] = '5368709120' + warmers = {} + warmers['warmer1'] = { + 'source': { + 'query': { + 'match_all': {} + } + } + } client.indices.create(index=index_name, body={ 'settings': settings, - 'mappings': mappings + 'mappings': mappings, + 'warmers': warmers }) health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) assert health['timed_out'] == False, 'cluster health timed out %s' % health diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/main/resources/bin/elasticsearch index 459169bc3c4..9e4af4ccbb2 100755 --- a/distribution/src/main/resources/bin/elasticsearch +++ b/distribution/src/main/resources/bin/elasticsearch @@ -132,7 +132,7 @@ HOSTNAME=`hostname | cut -d. -f1` export HOSTNAME # manual parsing to find out, if process should be detached -daemonized=`echo $* | grep -E -- '(^-d |-d$| -d |--daemonize$|--daemonize )'` +daemonized=`echo $* | egrep -- '(^-d |-d$| -d |--daemonize$|--daemonize )'` if [ -z "$daemonized" ] ; then exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" \ org.elasticsearch.bootstrap.Elasticsearch start "$@" diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 5706fc74c12..73447bb07e5 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -28,7 +28,7 @@ The Hadoop HDFS Repository plugin adds support for using HDFS as a repository. The following plugin has been contributed by our community: -* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by http://en.cam4.es/youngqcmeat/Wikimedia Foundation) +* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by Wikimedia Foundation) This community plugin appears to have been abandoned: diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 5d849807bc7..1ac3a7af244 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -621,11 +621,13 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows: "analysis": { "filter": { "french_elision": { - "type": "elision", - "articles": [ "l", "m", "t", "qu", "n", "s", - "j", "d", "c", "jusqu", "quoiqu", - "lorsqu", "puisqu" - ] + "type": "elision", + "articles_case": true, + "articles": [ + "l", "m", "t", "qu", "n", "s", + "j", "d", "c", "jusqu", "quoiqu", + "lorsqu", "puisqu" + ] }, "french_stop": { "type": "stop", diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index f50dc369c3d..eb566f20fee 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -63,7 +63,7 @@ Currently available <>: |`search` |`s` |Thread pool used for <>/<> operations |`snapshot` |`sn` |Thread pool used for <> operations |`suggest` |`su` |Thread pool used for <> operations -|`warmer` |`w` |Thread pool used for <> operations +|`warmer` |`w` |Thread pool used for index warm-up operations |======================================================================= The thread pool name (or alias) must be combined with a thread pool field below diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index da41ac52df5..7543a3f0bbb 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -4,8 +4,7 @@ [partintro] -- The indices APIs are used to manage individual indices, -index settings, aliases, mappings, index templates -and warmers. +index settings, aliases, mappings, and index templates. [float] [[index-management]] @@ -38,7 +37,6 @@ and warmers. * <> * <> * <> -* <> [float] [[shadow-replicas]] @@ -92,8 +90,6 @@ include::indices/analyze.asciidoc[] include::indices/templates.asciidoc[] -include::indices/warmers.asciidoc[] - include::indices/shadow-replicas.asciidoc[] include::indices/stats.asciidoc[] diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 52a867958ef..b5cd6c3aea1 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -86,27 +86,6 @@ curl -XPOST localhost:9200/test -d '{ }' -------------------------------------------------- -[float] -[[warmers]] -=== Warmers - -The create index API allows also to provide a set of <>: - -[source,js] --------------------------------------------------- -curl -XPUT localhost:9200/test -d '{ - "warmers" : { - "warmer_1" : { - "source" : { - "query" : { - ... - } - } - } - } -}' --------------------------------------------------- - [float] [[create-index-aliases]] === Aliases diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index 25d176668cc..5c652accfb9 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -11,9 +11,8 @@ $ curl -XDELETE 'http://localhost:9200/twitter/' The above example deletes an index called `twitter`. Specifying an index, alias or wildcard expression is required. -The delete index API can also be applied to more than one index, or on -all indices (be careful!) by using `_all` or `*` as index. +The delete index API can also be applied to more than one index, by either using a comma separated list, or on all indices (be careful!) by using `_all` or `*` as index. In order to disable allowing to delete indices via wildcards or `_all`, set `action.destructive_requires_name` setting in the config to `true`. -This setting can also be changed via the cluster update settings api. \ No newline at end of file +This setting can also be changed via the cluster update settings api. diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index 78cfa074550..b82bee05630 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -27,4 +27,4 @@ $ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings' The above command will only return the settings and mappings for the index called `twitter`. -The available features are `_settings`, `_mappings`, `_warmers` and `_aliases`. \ No newline at end of file +The available features are `_settings`, `_mappings` and `_aliases`. diff --git a/docs/reference/indices/warmers.asciidoc b/docs/reference/indices/warmers.asciidoc deleted file mode 100644 index b8f670af73a..00000000000 --- a/docs/reference/indices/warmers.asciidoc +++ /dev/null @@ -1,194 +0,0 @@ -[[indices-warmers]] -== Warmers - -Index warming allows to run registered search requests to warm up the index -before it is available for search. With the near real time aspect of search, -cold data (segments) will be warmed up before they become available for search. -This includes things such as the filter cache, filesystem cache, and loading -field data for fields. - -Warmup searches typically include requests that require heavy loading of -data, such as aggregations or sorting on specific fields. The warmup APIs -allows to register warmup (search) under specific names, remove them, -and get them. - -Index warmup can be disabled by setting `index.warmer.enabled` to -`false`. It is supported as a realtime setting using update settings -API. This can be handy when doing initial bulk indexing: disable pre -registered warmers to make indexing faster and less expensive and then -enable it. - -[float] -[[creation]] -=== Index Creation / Templates - -Warmers can be registered when an index gets created, for example: - -[source,js] --------------------------------------------------- -curl -XPUT localhost:9200/test -d '{ - "warmers" : { - "warmer_1" : { - "types" : [], - "source" : { - "query" : { - ... - }, - "aggs" : { - ... - } - } - } - } -}' --------------------------------------------------- - -Or, in an index template: - -[source,js] --------------------------------------------------- -curl -XPUT localhost:9200/_template/template_1 -d ' -{ - "template" : "te*", - "warmers" : { - "warmer_1" : { - "types" : [], - "source" : { - "query" : { - ... - }, - "aggs" : { - ... - } - } - } - } -}' --------------------------------------------------- - -On the same level as `types` and `source`, the `request_cache` flag is supported -to enable request caching for the warmed search request. If not specified, it will -use the index level configuration of query caching. - -[float] -[[warmer-adding]] -=== Put Warmer - -Allows to put a warmup search request on a specific index (or indices), -with the body composing of a regular search request. Types can be -provided as part of the URI if the search request is designed to be run -only against the specific types. - -Here is an example that registers a warmup called `warmer_1` against -index `test` (can be alias or several indices), for a search request -that runs against all types: - -[source,js] --------------------------------------------------- -curl -XPUT localhost:9200/test/_warmer/warmer_1 -d '{ - "query" : { - "match_all" : {} - }, - "aggs" : { - "aggs_1" : { - "terms" : { - "field" : "field" - } - } - } -}' --------------------------------------------------- - -And an example that registers a warmup against specific types: - -[source,js] --------------------------------------------------- -curl -XPUT localhost:9200/test/type1/_warmer/warmer_1 -d '{ - "query" : { - "match_all" : {} - }, - "aggs" : { - "aggs_1" : { - "terms" : { - "field" : "field" - } - } - } -}' --------------------------------------------------- - -All options: - -[source,js] --------------------------------------------------- - -PUT _warmer/{warmer_name} - -PUT /{index}/_warmer/{warmer_name} - -PUT /{index}/{type}/_warmer/{warmer_name} - --------------------------------------------------- - - -where - -[horizontal] -`{index}`:: `* | _all | glob pattern | name1, name2, …` - -`{type}`:: `* | _all | glob pattern | name1, name2, …` - -Instead of `_warmer` you can also use the plural `_warmers`. - -The `request_cache` parameter can be used to enable request caching for -the search request. If not specified, it will use the index level configuration -of query caching. - - -[float] -[[removing]] -=== Delete Warmers - -Warmers can be deleted using the following endpoint: - - - -[source,js] --------------------------------------------------- - -[DELETE] /{index}/_warmer/{name} - --------------------------------------------------- - - -where - -[horizontal] -`{index}`:: `* | _all | glob pattern | name1, name2, …` - -`{name}`:: `* | _all | glob pattern | name1, name2, …` - -Instead of `_warmer` you can also use the plural `_warmers`. - -[float] -[[warmer-retrieving]] -=== GETting Warmer - -Getting a warmer for specific index (or alias, or several indices) based -on its name. The provided name can be a simple wildcard expression or -omitted to get all warmers. - -Some examples: - -[source,js] --------------------------------------------------- -# get warmer named warmer_1 on test index -curl -XGET localhost:9200/test/_warmer/warmer_1 - -# get all warmers that start with warm on test index -curl -XGET localhost:9200/test/_warmer/warm* - -# get all warmers for test index -curl -XGET localhost:9200/test/_warmer/ --------------------------------------------------- - diff --git a/docs/reference/mapping/dynamic-mapping.asciidoc b/docs/reference/mapping/dynamic-mapping.asciidoc index 0f445ac6152..beb7d4360d4 100644 --- a/docs/reference/mapping/dynamic-mapping.asciidoc +++ b/docs/reference/mapping/dynamic-mapping.asciidoc @@ -33,7 +33,7 @@ purposes with: Custom rules to configure the mapping for dynamically added fields. TIP: <> allow you to configure the default -mappings, settings, aliases, and warmers for new indices, whether created +mappings, settings and aliases for new indices, whether created automatically or explicitly. diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index b4bb06e236c..e13b94c7773 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -55,7 +55,7 @@ GET my_index/_search "bool": { "must": [ { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "White" }} + { "match": { "user.last": "Smith" }} ] } } diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index c8750d11b82..f8cfad2f71c 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -144,7 +144,7 @@ In the future we will also provide plural versions to allow putting multiple map See <>, <>, <>, <>, <>, -<>, and <> for more details. +`warmers`, and <> for more details. === Index request diff --git a/docs/reference/migration/migrate_1_4.asciidoc b/docs/reference/migration/migrate_1_4.asciidoc index 03a4c1fe741..eecf9ca13e7 100644 --- a/docs/reference/migration/migrate_1_4.asciidoc +++ b/docs/reference/migration/migrate_1_4.asciidoc @@ -32,7 +32,7 @@ Add or update a mapping via the <> or [float] === Indices APIs -The <> will return a section for `warmers` even if there are +The get warmer api will return a section for `warmers` even if there are no warmers. This ensures that the following two examples are equivalent: [source,js] diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 190f440a8fd..d3c0b5a2d7a 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -17,6 +17,16 @@ your application to Elasticsearch 3.0. * <> [[breaking_30_search_changes]] +=== Warmers + +Thanks to several changes like doc values by default or disk-based norms, +warmers have become quite useless. As a consequence, warmers and the warmer +API have been removed: it is not possible anymore to register queries that +will run before a new IndexSearcher is published. + +Don't worry if you have warmers defined on your indices, they will simply be +ignored when upgrading to 3.0. + === Search changes ==== `search_type=count` removed @@ -578,3 +588,19 @@ balancing into account but don't assign the shard if the allocation deciders are in the case where shard copies can be found. Previously, a node not holding the shard copy was chosen if none of the nodes holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy, even if none of the nodes holding a shard copy satisfy the allocation deciders. + +=== Percolator + +Adding percolator queries and modifications to existing percolator queries are no longer visible in immediately +to the percolator. A refresh is required to run before the changes are visible to the percolator. + +The reason that this has changed is that on newly created indices the percolator automatically indexes the query terms +and these query terms are used at percolate time to reduce the amount of queries the percolate API needs evaluate. +This optimization didn't work in the percolate API mode where modifications to queries are immediately visible. + +The percolator by defaults sets the `size` option to `10` whereas before this was set to unlimited. + +The percolate api can no longer accept documents that have fields that don't exist in the mapping. + +When percolating an existing document then specifying a document in the source of the percolate request is not allowed +any more. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 823bdb70d07..322c9e7308e 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -443,3 +443,9 @@ The `not` query has been replaced by using a `mustNot` clause in a Boolean query === Nested type The docs for the `nested` field datatype have moved to <>. + +[role="exclude",id="indices-warmers"] +=== Warmers + +Warmers have been removed. There have been significant improvements to the +index that make warmers not necessary anymore. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index da7d2e5ee4b..a4de20ee213 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -73,6 +73,19 @@ the request with two different groups: } -------------------------------------------------- +[float] +[[global-search-timeout]] +== Global Search Timeout + +Individual searches can have a timeout as part of the +<>. Since search requests can originate from many +sources, Elasticsearch has a dynamic cluster-level setting for a global +search timeout that applies to all search requests that do not set a +timeout in the <>. The default value is no global +timeout. The setting key is `search.default_search_timeout` and can be +set using the <> endpoints. Setting this value +to `-1` resets the global search timeout to no timeout. + -- include::search/search.asciidoc[] diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc index dc4a14e335d..7f160d1a503 100644 --- a/docs/reference/search/percolate.asciidoc +++ b/docs/reference/search/percolate.asciidoc @@ -1,6 +1,12 @@ [[search-percolate]] == Percolator +added[3.0.0,Percolator queries modifications aren't visible immediately and a refresh is required] + +added[3.0.0,Percolate api by defaults limits the number of matches to `10` whereas before this wasn't set] + +added[3.0.0,For indices created on or after version 3.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 3.0.0 indices to take advantage of this new optimization] + Traditionally you design documents based on your data, store them into an index, and then define queries via the search API in order to retrieve these documents. The percolator works in the opposite direction. First you store queries into an index and then, via the percolate API, you define documents in order to retrieve these queries. @@ -10,9 +16,6 @@ JSON. This allows you to embed queries into documents via the index API. Elastic document and make it available to the percolate API. Since documents are also defined as JSON, you can define a document in a request to the percolate API. -The percolator and most of its features work in realtime, so once a percolate query is indexed it can immediately be used -in the percolate API. - [IMPORTANT] ===================================== @@ -219,7 +222,7 @@ filter will be included in the percolate execution. The filter option works in n occurred for the filter to included the latest percolate queries. * `query` - Same as the `filter` option, but also the score is computed. The computed scores can then be used by the `track_scores` and `sort` option. -* `size` - Defines to maximum number of matches (percolate queries) to be returned. Defaults to unlimited. +* `size` - Defines to maximum number of matches (percolate queries) to be returned. Defaults to 10. * `track_scores` - Whether the `_score` is included for each match. The `_score` is based on the query and represents how the query matched the *percolate query's metadata*, *not* how the document (that is being percolated) matched the query. The `query` option is required for this option. Defaults to `false`. @@ -310,6 +313,10 @@ document. Internally the percolate API will issue a GET request for fetching the `_source` of the document to percolate. For this feature to work, the `_source` for documents to be percolated needs to be stored. +If percolating an existing document and the a document is also specified in the source of the percolate request then +an error is thrown. Either the document to percolate should be specified in the source or be defined by specifying the +index, type and id. + [float] ==== Example @@ -379,13 +386,11 @@ requests.txt: {"percolate" : {"index" : "twitter", "type" : "tweet"}} {"doc" : {"message" : "some text"}} {"percolate" : {"index" : "twitter", "type" : "tweet", "id" : "1"}} -{} {"percolate" : {"index" : "users", "type" : "user", "id" : "3", "percolate_index" : "users_2012" }} {"size" : 10} {"count" : {"index" : "twitter", "type" : "tweet"}} {"doc" : {"message" : "some other text"}} {"count" : {"index" : "twitter", "type" : "tweet", "id" : "1"}} -{} -------------------------------------------------- For a percolate existing document item (headers with the `id` field), the response can be an empty JSON object. diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc index d19b850ec4a..0804ff27d58 100644 --- a/docs/reference/search/request/from-size.asciidoc +++ b/docs/reference/search/request/from-size.asciidoc @@ -21,6 +21,5 @@ defaults to `10`. -------------------------------------------------- Note that `from` + `size` can not be more than the `index.max_result_window` -index setting which defaults to 10,000. See the -{ref}/search-request-scroll.html[Scroll] api for more efficient ways to do deep -scrolling. +index setting which defaults to 10,000. See the <> +API for more efficient ways to do deep scrolling. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index a79071b2662..09d995bbd27 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -11,7 +11,7 @@ it's very useful to know which inner nested objects (in the case of nested) or c of parent/child) caused certain information to be returned. The inner hits feature can be used for this. This feature returns per search hit in the search response additional nested hits that caused a search hit to match in a different scope. -Inner hits can be used by defining a `inner_hits` definition on a `nested`, `has_child` or `has_parent` query and filter. +Inner hits can be used by defining an `inner_hits` definition on a `nested`, `has_child` or `has_parent` query and filter. The structure looks like this: [source,js] @@ -23,7 +23,7 @@ The structure looks like this: } -------------------------------------------------- -If `_inner_hits` is defined on a query that supports it then each search hit will contain a `inner_hits` json object with the following structure: +If `_inner_hits` is defined on a query that supports it then each search hit will contain an `inner_hits` json object with the following structure: [source,js] -------------------------------------------------- @@ -234,7 +234,7 @@ An example of a response snippet that could be generated from the above search r Besides defining inner hits on query and filters, inner hits can also be defined as a top level construct alongside the `query` and `aggregations` definition. The main reason for using the top level inner hits definition is to let the inner hits return documents that don't match with the main query. Also inner hits definitions can be nested via the -top level notation. Other then that the inner hit definition inside the query should be used, because that is the most +top level notation. Other than that, the inner hit definition inside the query should be used because that is the most compact way for defining inner hits. The following snippet explains the basic structure of inner hits defined at the top level of the search request body: @@ -254,7 +254,7 @@ The following snippet explains the basic structure of inner hits defined at the } -------------------------------------------------- -Inside the `inner_hits` definition, first the name if the inner hit is defined then whether the inner_hit +Inside the `inner_hits` definition, first the name of the inner hit is defined then whether the inner_hit is a nested by defining `path` or a parent/child based definition by defining `type`. The next object layer contains the name of the nested object field if the inner_hits is nested or the parent or child type if the inner_hit definition is parent/child based. diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 9e3943a32b2..2fd6e53effa 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -35,6 +35,3 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } -compileJava.options.compilerArgs << '-Xlint:-rawtypes' -compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' - diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/CountMethodValueSource.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/CountMethodValueSource.java index c50aa4da289..043a11eebad 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/CountMethodValueSource.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/CountMethodValueSource.java @@ -19,6 +19,10 @@ package org.elasticsearch.script.expression; +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; @@ -26,10 +30,6 @@ import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; -import java.io.IOException; -import java.util.Map; -import java.util.Objects; - /** * A ValueSource to create FunctionValues to get the count of the number of values in a field for a document. */ @@ -43,6 +43,7 @@ public class CountMethodValueSource extends ValueSource { } @Override + @SuppressWarnings("rawtypes") // ValueSource uses a rawtype public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException { AtomicFieldData leafData = fieldData.load(leaf); assert(leafData instanceof AtomicNumericFieldData); diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java index 9efeed54ff9..e6c9dcddc78 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java @@ -19,6 +19,10 @@ package org.elasticsearch.script.expression; +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.elasticsearch.index.fielddata.AtomicFieldData; @@ -26,10 +30,6 @@ import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.search.MultiValueMode; -import java.io.IOException; -import java.util.Map; -import java.util.Objects; - class DateMethodValueSource extends FieldDataValueSource { protected final String methodName; @@ -45,6 +45,7 @@ class DateMethodValueSource extends FieldDataValueSource { } @Override + @SuppressWarnings("rawtypes") // ValueSource uses a rawtype public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException { AtomicFieldData leafData = fieldData.load(leaf); assert(leafData instanceof AtomicNumericFieldData); diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java index 708cd0af152..ae84a5cbfe7 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java @@ -19,6 +19,10 @@ package org.elasticsearch.script.expression; +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; @@ -27,10 +31,6 @@ import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.search.MultiValueMode; -import java.io.IOException; -import java.util.Map; -import java.util.Objects; - /** * A {@link ValueSource} wrapper for field data. */ @@ -67,6 +67,7 @@ class FieldDataValueSource extends ValueSource { } @Override + @SuppressWarnings("rawtypes") // ValueSource uses a rawtype public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException { AtomicFieldData leafData = fieldData.load(leaf); assert(leafData instanceof AtomicNumericFieldData); diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstValueSource.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstValueSource.java index c9bcc239f28..bb05ef2325d 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstValueSource.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstValueSource.java @@ -19,13 +19,13 @@ package org.elasticsearch.script.expression; +import java.io.IOException; +import java.util.Map; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; -import java.io.IOException; -import java.util.Map; - /** * A {@link ValueSource} which has a stub {@link FunctionValues} that holds a dynamically replaceable constant double. */ @@ -37,6 +37,7 @@ class ReplaceableConstValueSource extends ValueSource { } @Override + @SuppressWarnings("rawtypes") // ValueSource uses a rawtype public FunctionValues getValues(Map map, LeafReaderContext atomicReaderContext) throws IOException { return fv; } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index b4c0106abbe..a866f338e29 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -19,6 +19,12 @@ package org.elasticsearch.script.expression; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.js.JavascriptCompiler; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -47,12 +53,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript; @@ -121,7 +121,7 @@ public class MoreExpressionTests extends ESIntegTestCase { client().prepareIndex("test", "doc", "1").setSource("text", "hello goodbye"), client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"), client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye")); - ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null)); + ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null)); SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java index 29213f0ac0e..66c3376f04e 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java @@ -128,7 +128,7 @@ public class TemplateQueryParserTests extends ESTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry(); - MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry); + MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, () -> context); IndexFieldDataService indexFieldDataService =new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService); BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new IndicesWarmer(idxSettings.getNodeSettings(), null), new BitsetFilterCache.Listener() { @Override diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index 13898be05a9..61c4fdbd583 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -30,6 +30,3 @@ dependencies { dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' } - -compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" - diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index 9374410765d..e33f1f1e7e2 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.analysis; +import java.util.Arrays; +import java.util.HashSet; + import org.apache.commons.codec.Encoder; import org.apache.commons.codec.language.Caverphone1; import org.apache.commons.codec.language.Caverphone2; @@ -43,9 +46,6 @@ import org.elasticsearch.index.analysis.phonetic.HaasePhonetik; import org.elasticsearch.index.analysis.phonetic.KoelnerPhonetik; import org.elasticsearch.index.analysis.phonetic.Nysiis; -import java.util.Arrays; -import java.util.HashSet; - /** * */ @@ -122,7 +122,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { if (encoder == null) { if (ruletype != null && nametype != null) { if (languageset != null) { - final LanguageSet languages = LanguageSet.from(new HashSet(Arrays.asList(languageset))); + final LanguageSet languages = LanguageSet.from(new HashSet<>(Arrays.asList(languageset))); return new BeiderMorseFilter(tokenStream, new PhoneticEngine(nametype, ruletype, true), languages); } return new BeiderMorseFilter(tokenStream, new PhoneticEngine(nametype, ruletype, true)); diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/KoelnerPhonetik.java index 57195062cdd..3c658191524 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/phonetic/KoelnerPhonetik.java @@ -19,9 +19,6 @@ package org.elasticsearch.index.analysis.phonetic; -import org.apache.commons.codec.EncoderException; -import org.apache.commons.codec.StringEncoder; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -31,6 +28,9 @@ import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.codec.EncoderException; +import org.apache.commons.codec.StringEncoder; + /** * Kölner Phonetik * @@ -49,13 +49,13 @@ public class KoelnerPhonetik implements StringEncoder { private static final String[] POSTEL_VARIATIONS_REPLACEMENTS = {"OWN", "AUN", "RW", "RB", "RSK", "WSK"}; private Pattern[] variationsPatterns; private boolean primary = false; - private final Set csz = new HashSet(Arrays.asList( + private final Set csz = new HashSet<>(Arrays.asList( 'C', 'S', 'Z')); - private final Set ckq = new HashSet(Arrays.asList( + private final Set ckq = new HashSet<>(Arrays.asList( 'C', 'K', 'Q')); - private final Set aouhkxq = new HashSet(Arrays.asList( + private final Set aouhkxq = new HashSet<>(Arrays.asList( 'A', 'O', 'U', 'H', 'K', 'X', 'Q')); - private final Set ahkloqrux = new HashSet(Arrays.asList( + private final Set ahkloqrux = new HashSet<>(Arrays.asList( 'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X')); /** @@ -139,10 +139,10 @@ public class KoelnerPhonetik implements StringEncoder { private List partition(String str) { String primaryForm = str; - List parts = new ArrayList(); + List parts = new ArrayList<>(); parts.add(primaryForm.replaceAll("[^\\p{L}\\p{N}]", "")); if (!primary) { - List tmpParts = new ArrayList(); + List tmpParts = new ArrayList<>(); tmpParts.addAll((Arrays.asList(str.split("[\\p{Z}\\p{C}\\p{P}]")))); int numberOfParts = tmpParts.size(); while (tmpParts.size() > 0) { @@ -156,9 +156,9 @@ public class KoelnerPhonetik implements StringEncoder { tmpParts.remove(0); } } - List variations = new ArrayList(); + List variations = new ArrayList<>(); for (int i = 0; i < parts.size(); i++) { - List variation = getVariations(parts.get(i)); + List variation = getVariations(parts.get(i)); if (variation != null) { variations.addAll(variation); } @@ -166,9 +166,9 @@ public class KoelnerPhonetik implements StringEncoder { return variations; } - private List getVariations(String str) { + private List getVariations(String str) { int position = 0; - List variations = new ArrayList(); + List variations = new ArrayList<>(); variations.add(""); while (position < str.length()) { int i = 0; @@ -182,7 +182,7 @@ public class KoelnerPhonetik implements StringEncoder { } if (substPos >= position) { i--; - List varNew = new ArrayList(); + List varNew = new ArrayList<>(); String prevPart = str.substring(position, substPos); for (int ii = 0; ii < variations.size(); ii++) { String tmp = variations.get(ii); diff --git a/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java b/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java index fa83fb4fd3d..7103cf1a4e0 100644 --- a/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java +++ b/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java @@ -105,7 +105,7 @@ public class DeleteByQueryRequest extends ActionRequest im } @Override - public DeleteByQueryRequest indices(String[] indices) { + public DeleteByQueryRequest indices(String... indices) { this.indices = indices; return this; } diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index d85d08794ea..12b479eb487 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -56,11 +56,9 @@ dependencyLicenses { mapping from: /jaxb-.*/, to: 'jaxb' } -compileJava.options.compilerArgs << '-Xlint:-path,-serial,-static,-unchecked' +compileJava.options.compilerArgs << '-Xlint:-path,-serial,-unchecked' // TODO: why is deprecation needed here but not in maven....? compileJava.options.compilerArgs << '-Xlint:-deprecation' -// TODO: and why does this static not show up in maven... -compileTestJava.options.compilerArgs << '-Xlint:-static' thirdPartyAudit.excludes = [ // classes are missing diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java index 35bb20bc8a7..5215b90e7e1 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java @@ -47,11 +47,7 @@ public class AzureDiscoveryModule extends AbstractModule { private Settings settings; // pkg private so it is settable by tests - static Class computeServiceImpl = AzureComputeServiceImpl.class; - - public static Class getComputeServiceImpl() { - return computeServiceImpl; - } + Class computeServiceImpl = AzureComputeServiceImpl.class; @Inject public AzureDiscoveryModule(Settings settings) { diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index f0dd69ff8c4..c5828d9c86d 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -25,8 +25,6 @@ esplugin { // no unit tests test.enabled = false -compileJava.options.compilerArgs << "-Xlint:-rawtypes" - configurations { exampleFixture } diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java index c1bcc65bfe2..0c0e71dc1c0 100644 --- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java +++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java @@ -19,6 +19,10 @@ package org.elasticsearch.plugin.example; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; + import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Module; @@ -28,10 +32,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.rest.action.cat.AbstractCatAction; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; - /** * Example of a plugin. */ @@ -59,6 +59,7 @@ public class JvmExamplePlugin extends Plugin { } @Override + @SuppressWarnings("rawtypes") // Plugin use a rawtype public Collection> nodeServices() { Collection> services = new ArrayList<>(); return services; diff --git a/plugins/lang-javascript/build.gradle b/plugins/lang-javascript/build.gradle index ead459f29d1..cce5869341e 100644 --- a/plugins/lang-javascript/build.gradle +++ b/plugins/lang-javascript/build.gradle @@ -26,13 +26,9 @@ dependencies { compile 'org.mozilla:rhino:1.7.7' } -compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" -compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" - integTest { cluster { systemProperty 'es.script.inline', 'on' systemProperty 'es.script.indexed', 'on' } } - diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index eca1265766d..14124551e76 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -19,6 +19,18 @@ package org.elasticsearch.script.javascript; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.CodeSource; +import java.security.PrivilegedAction; +import java.security.cert.Certificate; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.elasticsearch.SpecialPermission; @@ -49,18 +61,6 @@ import org.mozilla.javascript.ScriptableObject; import org.mozilla.javascript.SecurityController; import org.mozilla.javascript.WrapFactory; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.CodeSource; -import java.security.PrivilegedAction; -import java.security.cert.Certificate; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - /** * */ @@ -348,12 +348,14 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements setJavaPrimitiveWrap(false); // RingoJS does that..., claims its annoying... } - public Scriptable wrapAsJavaObject(Context cx, Scriptable scope, Object javaObject, Class staticType) { + @Override + @SuppressWarnings("unchecked") + public Scriptable wrapAsJavaObject(Context cx, Scriptable scope, Object javaObject, Class staticType) { if (javaObject instanceof Map) { - return NativeMap.wrap(scope, (Map) javaObject); + return NativeMap.wrap(scope, (Map) javaObject); } if (javaObject instanceof List) { - return NativeList.wrap(scope, (List) javaObject, staticType); + return NativeList.wrap(scope, (List) javaObject, staticType); } return super.wrapAsJavaObject(cx, scope, javaObject, staticType); } diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeMap.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeMap.java index 1dbf3454900..ef9be14b0dd 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeMap.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeMap.java @@ -19,12 +19,12 @@ package org.elasticsearch.script.javascript.support; -import org.mozilla.javascript.Scriptable; -import org.mozilla.javascript.Wrapper; - import java.util.Iterator; import java.util.Map; +import org.mozilla.javascript.Scriptable; +import org.mozilla.javascript.Wrapper; + /** * Wrapper for exposing maps in Rhino scripts. * @@ -55,26 +55,17 @@ public class NativeMap implements Scriptable, Wrapper { this.map = map; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Wrapper#unwrap() - */ - + @Override public Object unwrap() { return map; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getClassName() - */ - + @Override public String getClassName() { return "NativeMap"; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#get(java.lang.String, org.mozilla.javascript.Scriptable) - */ - + @Override public Object get(String name, Scriptable start) { // get the property from the underlying QName map if ("length".equals(name)) { @@ -84,69 +75,47 @@ public class NativeMap implements Scriptable, Wrapper { } } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#get(int, org.mozilla.javascript.Scriptable) - */ - + @Override public Object get(int index, Scriptable start) { Object value = null; int i = 0; - Iterator itrValues = map.values().iterator(); + Iterator itrValues = map.values().iterator(); while (i++ <= index && itrValues.hasNext()) { value = itrValues.next(); } return value; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#has(java.lang.String, org.mozilla.javascript.Scriptable) - */ - + @Override public boolean has(String name, Scriptable start) { // locate the property in the underlying map return map.containsKey(name); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#has(int, org.mozilla.javascript.Scriptable) - */ - + @Override public boolean has(int index, Scriptable start) { return (index >= 0 && map.values().size() > index); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#put(java.lang.String, org.mozilla.javascript.Scriptable, java.lang.Object) - */ - - @SuppressWarnings("unchecked") + @Override public void put(String name, Scriptable start, Object value) { map.put(name, value); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#put(int, org.mozilla.javascript.Scriptable, java.lang.Object) - */ - + @Override public void put(int index, Scriptable start, Object value) { // TODO: implement? } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#delete(java.lang.String) - */ - + @Override public void delete(String name) { map.remove(name); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#delete(int) - */ - + @Override public void delete(int index) { int i = 0; - Iterator itrKeys = map.keySet().iterator(); + Iterator itrKeys = map.keySet().iterator(); while (i <= index && itrKeys.hasNext()) { Object key = itrKeys.next(); if (i == index) { @@ -156,58 +125,37 @@ public class NativeMap implements Scriptable, Wrapper { } } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getPrototype() - */ - + @Override public Scriptable getPrototype() { return this.prototype; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#setPrototype(org.mozilla.javascript.Scriptable) - */ - + @Override public void setPrototype(Scriptable prototype) { this.prototype = prototype; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getParentScope() - */ - + @Override public Scriptable getParentScope() { return this.parentScope; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#setParentScope(org.mozilla.javascript.Scriptable) - */ - + @Override public void setParentScope(Scriptable parent) { this.parentScope = parent; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getIds() - */ - + @Override public Object[] getIds() { return map.keySet().toArray(); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getDefaultValue(java.lang.Class) - */ - - public Object getDefaultValue(Class hint) { + @Override + public Object getDefaultValue(Class hint) { return null; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#hasInstance(org.mozilla.javascript.Scriptable) - */ - + @Override public boolean hasInstance(Scriptable value) { if (!(value instanceof Wrapper)) return false; diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptValueConverter.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptValueConverter.java index a90948c1877..111d2a22b87 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptValueConverter.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptValueConverter.java @@ -19,13 +19,6 @@ package org.elasticsearch.script.javascript.support; -import org.mozilla.javascript.Context; -import org.mozilla.javascript.IdScriptableObject; -import org.mozilla.javascript.NativeArray; -import org.mozilla.javascript.ScriptRuntime; -import org.mozilla.javascript.Scriptable; -import org.mozilla.javascript.Wrapper; - import java.util.ArrayList; import java.util.Collection; import java.util.Date; @@ -33,6 +26,13 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.mozilla.javascript.Context; +import org.mozilla.javascript.IdScriptableObject; +import org.mozilla.javascript.NativeArray; +import org.mozilla.javascript.ScriptRuntime; +import org.mozilla.javascript.Scriptable; +import org.mozilla.javascript.Wrapper; + /** * Value Converter to marshal objects between Java and Javascript. * @@ -126,6 +126,7 @@ public final class ScriptValueConverter { value = list; } else if (value instanceof Map) { // ensure each value in the Map is unwrapped (which may have been an unwrapped NativeMap!) + @SuppressWarnings("unchecked") Map map = (Map) value; Map copyMap = new HashMap(map.size()); for (Object key : map.keySet()) { @@ -157,6 +158,7 @@ public final class ScriptValueConverter { Context.getCurrentContext(), scope, TYPE_DATE, new Object[]{date.getTime()}); } else if (value instanceof Collection) { // recursively convert each value in the collection + @SuppressWarnings("unchecked") Collection collection = (Collection) value; Object[] array = new Object[collection.size()]; int index = 0; @@ -166,7 +168,9 @@ public final class ScriptValueConverter { // convert array to a native JavaScript Array value = Context.getCurrentContext().newArray(scope, array); } else if (value instanceof Map) { - value = NativeMap.wrap(scope, (Map) value); + @SuppressWarnings("unchecked") + Map map = (Map) value; + value = NativeMap.wrap(scope, map); } // simple numbers, strings and booleans are wrapped automatically by Rhino diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableLinkedHashMap.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableLinkedHashMap.java index 680b20a0256..f4df25f0861 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableLinkedHashMap.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableLinkedHashMap.java @@ -19,12 +19,12 @@ package org.elasticsearch.script.javascript.support; -import org.mozilla.javascript.Scriptable; - import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; +import org.mozilla.javascript.Scriptable; + /** * Implementation of a Scriptable Map. This is the best choice for maps that want to represent * JavaScript associative arrays - allowing access via key and integer index. It maintains and @@ -53,6 +53,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#getClassName() */ + @Override public String getClassName() { return "ScriptableMap"; } @@ -60,6 +61,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#get(java.lang.String, org.mozilla.javascript.Scriptable) */ + @Override public Object get(String name, Scriptable start) { // get the property from the underlying QName map if ("length".equals(name)) { @@ -72,10 +74,11 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#get(int, org.mozilla.javascript.Scriptable) */ + @Override public Object get(int index, Scriptable start) { Object value = null; int i = 0; - Iterator itrValues = this.values().iterator(); + Iterator itrValues = this.values().iterator(); while (i++ <= index && itrValues.hasNext()) { value = itrValues.next(); } @@ -85,6 +88,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#has(java.lang.String, org.mozilla.javascript.Scriptable) */ + @Override public boolean has(String name, Scriptable start) { // locate the property in the underlying map return containsKey(name); @@ -93,6 +97,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#has(int, org.mozilla.javascript.Scriptable) */ + @Override public boolean has(int index, Scriptable start) { return (index >= 0 && this.values().size() > index); } @@ -100,6 +105,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#put(java.lang.String, org.mozilla.javascript.Scriptable, java.lang.Object) */ + @Override @SuppressWarnings("unchecked") public void put(String name, Scriptable start, Object value) { // add the property to the underlying QName map @@ -109,6 +115,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#put(int, org.mozilla.javascript.Scriptable, java.lang.Object) */ + @Override public void put(int index, Scriptable start, Object value) { // TODO: implement? } @@ -116,6 +123,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#delete(java.lang.String) */ + @Override public void delete(String name) { // remove the property from the underlying QName map remove(name); @@ -124,9 +132,10 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#delete(int) */ + @Override public void delete(int index) { int i = 0; - Iterator itrKeys = this.keySet().iterator(); + Iterator itrKeys = this.keySet().iterator(); while (i <= index && itrKeys.hasNext()) { Object key = itrKeys.next(); if (i == index) { @@ -139,6 +148,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#getPrototype() */ + @Override public Scriptable getPrototype() { return this.prototype; } @@ -146,6 +156,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#setPrototype(org.mozilla.javascript.Scriptable) */ + @Override public void setPrototype(Scriptable prototype) { this.prototype = prototype; } @@ -153,6 +164,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#getParentScope() */ + @Override public Scriptable getParentScope() { return this.parentScope; } @@ -160,6 +172,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#setParentScope(org.mozilla.javascript.Scriptable) */ + @Override public void setParentScope(Scriptable parent) { this.parentScope = parent; } @@ -167,6 +180,7 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#getIds() */ + @Override public Object[] getIds() { return keySet().toArray(); } @@ -174,13 +188,15 @@ public class ScriptableLinkedHashMap extends LinkedHashMap implement /** * @see org.mozilla.javascript.Scriptable#getDefaultValue(java.lang.Class) */ - public Object getDefaultValue(Class hint) { + @Override + public Object getDefaultValue(Class hint) { return null; } /** * @see org.mozilla.javascript.Scriptable#hasInstance(org.mozilla.javascript.Scriptable) */ + @Override public boolean hasInstance(Scriptable instance) { return instance instanceof ScriptableLinkedHashMap; } diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableWrappedMap.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableWrappedMap.java index 9ff1f61c8d5..1aa2326482a 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableWrappedMap.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/ScriptableWrappedMap.java @@ -19,14 +19,14 @@ package org.elasticsearch.script.javascript.support; -import org.mozilla.javascript.Scriptable; -import org.mozilla.javascript.Wrapper; - import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.Set; +import org.mozilla.javascript.Scriptable; +import org.mozilla.javascript.Wrapper; + /** * Implementation of a Scriptable Map. This is the best choice where you want values to be * persisted directly to an underlying map supplied on construction. The class automatically @@ -37,8 +37,8 @@ import java.util.Set; * * */ -public class ScriptableWrappedMap implements ScriptableMap, Wrapper { - private Map map; +public class ScriptableWrappedMap implements ScriptableMap, Wrapper { + private Map map; private Scriptable parentScope; private Scriptable prototype; @@ -54,38 +54,29 @@ public class ScriptableWrappedMap implements ScriptableMap, Wrapper { /** * Construct */ - public ScriptableWrappedMap(Map map) { + public ScriptableWrappedMap(Map map) { this.map = map; } /** * Construct */ - public ScriptableWrappedMap(Scriptable scope, Map map) { + public ScriptableWrappedMap(Scriptable scope, Map map) { this.parentScope = scope; this.map = map; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Wrapper#unwrap() - */ - + @Override public Object unwrap() { return map; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getClassName() - */ - + @Override public String getClassName() { return "ScriptableWrappedMap"; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#get(java.lang.String, org.mozilla.javascript.Scriptable) - */ - + @Override public Object get(String name, Scriptable start) { // get the property from the underlying QName map if ("length".equals(name)) { @@ -95,69 +86,47 @@ public class ScriptableWrappedMap implements ScriptableMap, Wrapper { } } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#get(int, org.mozilla.javascript.Scriptable) - */ - + @Override public Object get(int index, Scriptable start) { Object value = null; int i = 0; - Iterator itrValues = map.values().iterator(); + Iterator itrValues = map.values().iterator(); while (i++ <= index && itrValues.hasNext()) { value = itrValues.next(); } return ScriptValueConverter.wrapValue(this.parentScope != null ? this.parentScope : start, value); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#has(java.lang.String, org.mozilla.javascript.Scriptable) - */ - + @Override public boolean has(String name, Scriptable start) { // locate the property in the underlying map return map.containsKey(name); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#has(int, org.mozilla.javascript.Scriptable) - */ - + @Override public boolean has(int index, Scriptable start) { return (index >= 0 && map.values().size() > index); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#put(java.lang.String, org.mozilla.javascript.Scriptable, java.lang.Object) - */ - - @SuppressWarnings("unchecked") + @Override public void put(String name, Scriptable start, Object value) { map.put(name, ScriptValueConverter.unwrapValue(value)); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#put(int, org.mozilla.javascript.Scriptable, java.lang.Object) - */ - + @Override public void put(int index, Scriptable start, Object value) { // TODO: implement? } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#delete(java.lang.String) - */ - + @Override public void delete(String name) { map.remove(name); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#delete(int) - */ - + @Override public void delete(int index) { int i = 0; - Iterator itrKeys = map.keySet().iterator(); + Iterator itrKeys = map.keySet().iterator(); while (i <= index && itrKeys.hasNext()) { Object key = itrKeys.next(); if (i == index) { @@ -167,58 +136,37 @@ public class ScriptableWrappedMap implements ScriptableMap, Wrapper { } } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getPrototype() - */ - + @Override public Scriptable getPrototype() { return this.prototype; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#setPrototype(org.mozilla.javascript.Scriptable) - */ - + @Override public void setPrototype(Scriptable prototype) { this.prototype = prototype; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getParentScope() - */ - + @Override public Scriptable getParentScope() { return this.parentScope; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#setParentScope(org.mozilla.javascript.Scriptable) - */ - + @Override public void setParentScope(Scriptable parent) { this.parentScope = parent; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getIds() - */ - + @Override public Object[] getIds() { return map.keySet().toArray(); } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#getDefaultValue(java.lang.Class) - */ - - public Object getDefaultValue(Class hint) { + @Override + public Object getDefaultValue(Class hint) { return null; } - /* (non-Javadoc) - * @see org.mozilla.javascript.Scriptable#hasInstance(org.mozilla.javascript.Scriptable) - */ - + @Override public boolean hasInstance(Scriptable value) { if (!(value instanceof Wrapper)) return false; @@ -226,106 +174,66 @@ public class ScriptableWrappedMap implements ScriptableMap, Wrapper { return Map.class.isInstance(instance); } - /* (non-Javadoc) - * @see java.util.Map#clear() - */ - + @Override public void clear() { this.map.clear(); } - /* (non-Javadoc) - * @see java.util.Map#containsKey(java.lang.Object) - */ - + @Override public boolean containsKey(Object key) { return this.map.containsKey(key); } - /* (non-Javadoc) - * @see java.util.Map#containsValue(java.lang.Object) - */ - + @Override public boolean containsValue(Object value) { return this.map.containsValue(value); } - /* (non-Javadoc) - * @see java.util.Map#entrySet() - */ - - public Set entrySet() { + @Override + public Set> entrySet() { return this.map.entrySet(); } - /* (non-Javadoc) - * @see java.util.Map#get(java.lang.Object) - */ - + @Override public Object get(Object key) { return this.map.get(key); } - /* (non-Javadoc) - * @see java.util.Map#isEmpty() - */ - + @Override public boolean isEmpty() { return (this.map.size() == 0); } - /* (non-Javadoc) - * @see java.util.Map#keySet() - */ - - public Set keySet() { + @Override + public Set keySet() { return this.map.keySet(); } - /* (non-Javadoc) - * @see java.util.Map#put(java.lang.Object, java.lang.Object) - */ - + @Override public Object put(Object key, Object value) { return this.map.put(key, value); } - /* (non-Javadoc) - * @see java.util.Map#putAll(java.util.Map) - */ - - public void putAll(Map t) { + @Override + public void putAll(Map t) { this.map.putAll(t); } - /* (non-Javadoc) - * @see java.util.Map#remove(java.lang.Object) - */ - + @Override public Object remove(Object key) { return this.map.remove(key); } - /* (non-Javadoc) - * @see java.util.Map#size() - */ - + @Override public int size() { return this.map.size(); } - /* (non-Javadoc) - * @see java.util.Map#values() - */ - - public Collection values() { + @Override + public Collection values() { return this.map.values(); } - /* (non-Javadoc) - * @see java.lang.Object#toString() - */ - @Override public String toString() { return (this.map != null ? this.map.toString() : super.toString()); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java index 9d8357bb582..5ed2e7e4cb8 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java @@ -19,6 +19,12 @@ package org.elasticsearch.script.javascript; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; @@ -28,12 +34,6 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -59,6 +59,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { assertThat(((Number) o).intValue(), equalTo(3)); } + @SuppressWarnings("unchecked") public void testMapAccess() { Map vars = new HashMap(); @@ -75,15 +76,17 @@ public class JavaScriptScriptEngineTests extends ESTestCase { assertThat(((String) o), equalTo("2")); } + @SuppressWarnings("unchecked") public void testJavaScriptObjectToMap() { Map vars = new HashMap(); Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectToMap", "js", se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1", Collections.emptyMap())), vars).run(); - Map obj1 = (Map) o; + Map obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); } + @SuppressWarnings("unchecked") public void testJavaScriptObjectMapInter() { Map vars = new HashMap(); Map ctx = new HashMap(); @@ -102,6 +105,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { assertThat((String) ((Map) ctx.get("obj2")).get("prop2"), equalTo("value2")); } + @SuppressWarnings("unchecked") public void testJavaScriptInnerArrayCreation() { Map ctx = new HashMap(); Map doc = new HashMap(); @@ -115,9 +119,10 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map unwrap = (Map) script.unwrap(ctx); - assertThat(((Map) unwrap.get("doc")).get("field1"), instanceOf(List.class)); + assertThat(((Map) unwrap.get("doc")).get("field1"), instanceOf(List.class)); } + @SuppressWarnings("unchecked") public void testAccessListInScript() { Map vars = new HashMap(); Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index 2aa6e13a99f..b6be9f6dde0 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.script.javascript; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; @@ -30,7 +31,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -53,8 +53,8 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void run() { try { barrier.await(); - long x = ThreadLocalRandom.current().nextInt(); - long y = ThreadLocalRandom.current().nextInt(); + long x = Randomness.get().nextInt(); + long y = Randomness.get().nextInt(); long addition = x + y; Map vars = new HashMap(); vars.put("x", x); @@ -95,12 +95,12 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void run() { try { barrier.await(); - long x = ThreadLocalRandom.current().nextInt(); + long x = Randomness.get().nextInt(); Map vars = new HashMap(); vars.put("x", x); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); for (int i = 0; i < 100000; i++) { - long y = ThreadLocalRandom.current().nextInt(); + long y = Randomness.get().nextInt(); long addition = x + y; script.setNextVar("y", y); long result = ((Number) script.run()).longValue(); @@ -139,8 +139,8 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { barrier.await(); Map runtimeVars = new HashMap(); for (int i = 0; i < 100000; i++) { - long x = ThreadLocalRandom.current().nextInt(); - long y = ThreadLocalRandom.current().nextInt(); + long x = Randomness.get().nextInt(); + long y = Randomness.get().nextInt(); long addition = x + y; runtimeVars.put("x", x); runtimeVars.put("y", y); diff --git a/plugins/lang-plan-a/build.gradle b/plugins/lang-plan-a/build.gradle index dc0cfca2fa7..810f0df4e16 100644 --- a/plugins/lang-plan-a/build.gradle +++ b/plugins/lang-plan-a/build.gradle @@ -35,7 +35,7 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } -compileJava.options.compilerArgs << '-Xlint:-cast,-fallthrough,-rawtypes' +compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-unchecked' // regeneration logic, comes in via ant right now diff --git a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Analyzer.java b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Analyzer.java index a20c32965b2..eb2681cfba8 100644 --- a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Analyzer.java +++ b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Analyzer.java @@ -2223,12 +2223,15 @@ class Analyzer extends PlanAParserBaseVisitor { case LONG: incremd.preConst = positive ? 1L : -1L; incremd.from = definition.longType; + break; case FLOAT: incremd.preConst = positive ? 1.0F : -1.0F; incremd.from = definition.floatType; + break; case DOUBLE: incremd.preConst = positive ? 1.0 : -1.0; incremd.from = definition.doubleType; + break; default: incremd.preConst = positive ? 1 : -1; incremd.from = definition.intType; diff --git a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Def.java b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Def.java index bd9b146e41e..c7a8ce410fd 100644 --- a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Def.java +++ b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/Def.java @@ -19,18 +19,18 @@ package org.elasticsearch.plan.a; +import org.elasticsearch.plan.a.Definition.Cast; +import org.elasticsearch.plan.a.Definition.Field; +import org.elasticsearch.plan.a.Definition.Method; +import org.elasticsearch.plan.a.Definition.Struct; +import org.elasticsearch.plan.a.Definition.Transform; +import org.elasticsearch.plan.a.Definition.Type; + import java.lang.invoke.MethodHandle; import java.lang.reflect.Array; import java.util.List; import java.util.Map; -import static org.elasticsearch.plan.a.Definition.Cast; -import static org.elasticsearch.plan.a.Definition.Field; -import static org.elasticsearch.plan.a.Definition.Method; -import static org.elasticsearch.plan.a.Definition.Struct; -import static org.elasticsearch.plan.a.Definition.Transform; -import static org.elasticsearch.plan.a.Definition.Type; - public class Def { public static Object methodCall(final Object owner, final String name, final Definition definition, final Object[] arguments, final boolean[] typesafe) { @@ -70,7 +70,7 @@ public class Def { } } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public static void fieldStore(final Object owner, Object value, final String name, final Definition definition, final boolean typesafe) { final Field field = getField(owner, name, definition); @@ -117,7 +117,7 @@ public class Def { } } - @SuppressWarnings("unchecked") + @SuppressWarnings("rawtypes") public static Object fieldLoad(final Object owner, final String name, final Definition definition) { if (owner.getClass().isArray() && "length".equals(name)) { return Array.getLength(owner); @@ -163,7 +163,7 @@ public class Def { } } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public static void arrayStore(final Object array, Object index, Object value, final Definition definition, final boolean indexsafe, final boolean valuesafe) { if (array instanceof Map) { @@ -206,7 +206,7 @@ public class Def { } } - @SuppressWarnings("unchecked") + @SuppressWarnings("rawtypes") public static Object arrayLoad(final Object array, Object index, final Definition definition, final boolean indexsafe) { if (array instanceof Map) { @@ -257,7 +257,7 @@ public class Def { } } - for (final Class iface : clazz.getInterfaces()) { + for (final Class iface : clazz.getInterfaces()) { struct = definition.classes.get(iface); if (struct != null) { @@ -303,7 +303,7 @@ public class Def { } } - for (final Class iface : clazz.getInterfaces()) { + for (final Class iface : clazz.getInterfaces()) { struct = definition.classes.get(iface); if (struct != null) { @@ -348,7 +348,7 @@ public class Def { break; } - for (final Class iface : fromClass.getInterfaces()) { + for (final Class iface : fromClass.getInterfaces()) { fromStruct = definition.classes.get(iface); if (fromStruct != null) { @@ -371,7 +371,7 @@ public class Def { break; } - for (final Class iface : toClass.getInterfaces()) { + for (final Class iface : toClass.getInterfaces()) { toStruct = definition.classes.get(iface); if (toStruct != null) { @@ -442,28 +442,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() * (double)(char)right; + return ((Number)left).doubleValue() * (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() * (float)(char)right; + return ((Number)left).floatValue() * (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() * (long)(char)right; + return ((Number)left).longValue() * (char)right; } else { - return ((Number)left).intValue() * (int)(char)right; + return ((Number)left).intValue() * (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left * ((Number)right).doubleValue(); + return (char)left * ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left * ((Number)right).floatValue(); + return (char)left * ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left * ((Number)right).longValue(); + return (char)left * ((Number)right).longValue(); } else { - return (int)(char)left * ((Number)right).intValue(); + return (char)left * ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left * (int)(char)right; + return (char)left * (char)right; } } @@ -485,28 +485,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() / (double)(char)right; + return ((Number)left).doubleValue() / (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() / (float)(char)right; + return ((Number)left).floatValue() / (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() / (long)(char)right; + return ((Number)left).longValue() / (char)right; } else { - return ((Number)left).intValue() / (int)(char)right; + return ((Number)left).intValue() / (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left / ((Number)right).doubleValue(); + return (char)left / ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left / ((Number)right).floatValue(); + return (char)left / ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left / ((Number)right).longValue(); + return (char)left / ((Number)right).longValue(); } else { - return (int)(char)left / ((Number)right).intValue(); + return (char)left / ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left / (int)(char)right; + return (char)left / (char)right; } } @@ -528,28 +528,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() % (double)(char)right; + return ((Number)left).doubleValue() % (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() % (float)(char)right; + return ((Number)left).floatValue() % (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() % (long)(char)right; + return ((Number)left).longValue() % (char)right; } else { - return ((Number)left).intValue() % (int)(char)right; + return ((Number)left).intValue() % (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left % ((Number)right).doubleValue(); + return (char)left % ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left % ((Number)right).floatValue(); + return (char)left % ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left % ((Number)right).longValue(); + return (char)left % ((Number)right).longValue(); } else { - return (int)(char)left % ((Number)right).intValue(); + return (char)left % ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left % (int)(char)right; + return (char)left % (char)right; } } @@ -573,28 +573,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() + (double)(char)right; + return ((Number)left).doubleValue() + (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() + (float)(char)right; + return ((Number)left).floatValue() + (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() + (long)(char)right; + return ((Number)left).longValue() + (char)right; } else { - return ((Number)left).intValue() + (int)(char)right; + return ((Number)left).intValue() + (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left + ((Number)right).doubleValue(); + return (char)left + ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left + ((Number)right).floatValue(); + return (char)left + ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left + ((Number)right).longValue(); + return (char)left + ((Number)right).longValue(); } else { - return (int)(char)left + ((Number)right).intValue(); + return (char)left + ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left + (int)(char)right; + return (char)left + (char)right; } } @@ -616,28 +616,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() - (double)(char)right; + return ((Number)left).doubleValue() - (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() - (float)(char)right; + return ((Number)left).floatValue() - (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() - (long)(char)right; + return ((Number)left).longValue() - (char)right; } else { - return ((Number)left).intValue() - (int)(char)right; + return ((Number)left).intValue() - (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left - ((Number)right).doubleValue(); + return (char)left - ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left - ((Number)right).floatValue(); + return (char)left - ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left - ((Number)right).longValue(); + return (char)left - ((Number)right).longValue(); } else { - return (int)(char)left - ((Number)right).intValue(); + return (char)left - ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left - (int)(char)right; + return (char)left - (char)right; } } @@ -657,9 +657,9 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() << (long)(char)right; + return ((Number)left).longValue() << (char)right; } else { - return ((Number)left).intValue() << (int)(char)right; + return ((Number)left).intValue() << (char)right; } } } else if (left instanceof Character) { @@ -667,10 +667,10 @@ public class Def { if (right instanceof Double || right instanceof Float || right instanceof Long) { return (long)(char)left << ((Number)right).longValue(); } else { - return (int)(char)left << ((Number)right).intValue(); + return (char)left << ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left << (int)(char)right; + return (char)left << (char)right; } } @@ -690,9 +690,9 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() >> (long)(char)right; + return ((Number)left).longValue() >> (char)right; } else { - return ((Number)left).intValue() >> (int)(char)right; + return ((Number)left).intValue() >> (char)right; } } } else if (left instanceof Character) { @@ -700,10 +700,10 @@ public class Def { if (right instanceof Double || right instanceof Float || right instanceof Long) { return (long)(char)left >> ((Number)right).longValue(); } else { - return (int)(char)left >> ((Number)right).intValue(); + return (char)left >> ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left >> (int)(char)right; + return (char)left >> (char)right; } } @@ -723,9 +723,9 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() >>> (long)(char)right; + return ((Number)left).longValue() >>> (char)right; } else { - return ((Number)left).intValue() >>> (int)(char)right; + return ((Number)left).intValue() >>> (char)right; } } } else if (left instanceof Character) { @@ -733,10 +733,10 @@ public class Def { if (right instanceof Double || right instanceof Float || right instanceof Long) { return (long)(char)left >>> ((Number)right).longValue(); } else { - return (int)(char)left >>> ((Number)right).intValue(); + return (char)left >>> ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left >>> (int)(char)right; + return (char)left >>> (char)right; } } @@ -758,20 +758,20 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() & (long)(char)right; + return ((Number)left).longValue() & (char)right; } else { - return ((Number)left).intValue() & (int)(char)right; + return ((Number)left).intValue() & (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double || right instanceof Float || right instanceof Long) { - return (long)(char)left & ((Number)right).longValue(); + return (char)left & ((Number)right).longValue(); } else { - return (int)(char)left & ((Number)right).intValue(); + return (char)left & ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left & (int)(char)right; + return (char)left & (char)right; } } @@ -793,20 +793,20 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() ^ (long)(char)right; + return ((Number)left).longValue() ^ (char)right; } else { - return ((Number)left).intValue() ^ (int)(char)right; + return ((Number)left).intValue() ^ (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double || right instanceof Float || right instanceof Long) { - return (long)(char)left ^ ((Number)right).longValue(); + return (char)left ^ ((Number)right).longValue(); } else { - return (int)(char)left ^ ((Number)right).intValue(); + return (char)left ^ ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left ^ (int)(char)right; + return (char)left ^ (char)right; } } @@ -828,20 +828,20 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double || left instanceof Float || left instanceof Long) { - return ((Number)left).longValue() | (long)(char)right; + return ((Number)left).longValue() | (char)right; } else { - return ((Number)left).intValue() | (int)(char)right; + return ((Number)left).intValue() | (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double || right instanceof Float || right instanceof Long) { - return (long)(char)left | ((Number)right).longValue(); + return (char)left | ((Number)right).longValue(); } else { - return (int)(char)left | ((Number)right).intValue(); + return (char)left | ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left | (int)(char)right; + return (char)left | (char)right; } } @@ -855,48 +855,48 @@ public class Def { if (right instanceof Number) { return (double)left == ((Number)right).doubleValue(); } else if (right instanceof Character) { - return (double)left == (double)(char)right; + return (double)left == (char)right; } } else if (right instanceof Double) { if (left instanceof Number) { return ((Number)left).doubleValue() == (double)right; } else if (left instanceof Character) { - return (double)(char)left == ((Number)right).doubleValue(); + return (char)left == ((Number)right).doubleValue(); } } else if (left instanceof Float) { if (right instanceof Number) { return (float)left == ((Number)right).floatValue(); } else if (right instanceof Character) { - return (float)left == (float)(char)right; + return (float)left == (char)right; } } else if (right instanceof Float) { if (left instanceof Number) { return ((Number)left).floatValue() == (float)right; } else if (left instanceof Character) { - return (float)(char)left == ((Number)right).floatValue(); + return (char)left == ((Number)right).floatValue(); } } else if (left instanceof Long) { if (right instanceof Number) { return (long)left == ((Number)right).longValue(); } else if (right instanceof Character) { - return (long)left == (long)(char)right; + return (long)left == (char)right; } } else if (right instanceof Long) { if (left instanceof Number) { return ((Number)left).longValue() == (long)right; } else if (left instanceof Character) { - return (long)(char)left == ((Number)right).longValue(); + return (char)left == ((Number)right).longValue(); } } else if (left instanceof Number) { if (right instanceof Number) { return ((Number)left).intValue() == ((Number)right).intValue(); } else if (right instanceof Character) { - return ((Number)left).intValue() == (int)(char)right; + return ((Number)left).intValue() == (char)right; } } else if (right instanceof Number && left instanceof Character) { - return (int)(char)left == ((Number)right).intValue(); + return (char)left == ((Number)right).intValue(); } else if (left instanceof Character && right instanceof Character) { - return (int)(char)left == (int)(char)right; + return (char)left == (char)right; } return left.equals(right); @@ -919,28 +919,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() < (double)(char)right; + return ((Number)left).doubleValue() < (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() < (float)(char)right; + return ((Number)left).floatValue() < (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() < (long)(char)right; + return ((Number)left).longValue() < (char)right; } else { - return ((Number)left).intValue() < (int)(char)right; + return ((Number)left).intValue() < (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left < ((Number)right).doubleValue(); + return (char)left < ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left < ((Number)right).floatValue(); + return (char)left < ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left < ((Number)right).longValue(); + return (char)left < ((Number)right).longValue(); } else { - return (int)(char)left < ((Number)right).intValue(); + return (char)left < ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left < (int)(char)right; + return (char)left < (char)right; } } @@ -962,28 +962,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() <= (double)(char)right; + return ((Number)left).doubleValue() <= (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() <= (float)(char)right; + return ((Number)left).floatValue() <= (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() <= (long)(char)right; + return ((Number)left).longValue() <= (char)right; } else { - return ((Number)left).intValue() <= (int)(char)right; + return ((Number)left).intValue() <= (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left <= ((Number)right).doubleValue(); + return (char)left <= ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left <= ((Number)right).floatValue(); + return (char)left <= ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left <= ((Number)right).longValue(); + return (char)left <= ((Number)right).longValue(); } else { - return (int)(char)left <= ((Number)right).intValue(); + return (char)left <= ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left <= (int)(char)right; + return (char)left <= (char)right; } } @@ -1005,28 +1005,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() > (double)(char)right; + return ((Number)left).doubleValue() > (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() > (float)(char)right; + return ((Number)left).floatValue() > (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() > (long)(char)right; + return ((Number)left).longValue() > (char)right; } else { - return ((Number)left).intValue() > (int)(char)right; + return ((Number)left).intValue() > (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left > ((Number)right).doubleValue(); + return (char)left > ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left > ((Number)right).floatValue(); + return (char)left > ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left > ((Number)right).longValue(); + return (char)left > ((Number)right).longValue(); } else { - return (int)(char)left > ((Number)right).intValue(); + return (char)left > ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left > (int)(char)right; + return (char)left > (char)right; } } @@ -1048,28 +1048,28 @@ public class Def { } } else if (right instanceof Character) { if (left instanceof Double) { - return ((Number)left).doubleValue() >= (double)(char)right; + return ((Number)left).doubleValue() >= (char)right; } else if (left instanceof Float) { - return ((Number)left).floatValue() >= (float)(char)right; + return ((Number)left).floatValue() >= (char)right; } else if (left instanceof Long) { - return ((Number)left).longValue() >= (long)(char)right; + return ((Number)left).longValue() >= (char)right; } else { - return ((Number)left).intValue() >= (int)(char)right; + return ((Number)left).intValue() >= (char)right; } } } else if (left instanceof Character) { if (right instanceof Number) { if (right instanceof Double) { - return (double)(char)left >= ((Number)right).doubleValue(); + return (char)left >= ((Number)right).doubleValue(); } else if (right instanceof Float) { - return (float)(char)left >= ((Number)right).floatValue(); + return (char)left >= ((Number)right).floatValue(); } else if (right instanceof Long) { - return (long)(char)left >= ((Number)right).longValue(); + return (char)left >= ((Number)right).longValue(); } else { - return (int)(char)left >= ((Number)right).intValue(); + return (char)left >= ((Number)right).intValue(); } } else if (right instanceof Character) { - return (int)(char)left >= (int)(char)right; + return (char)left >= (char)right; } } @@ -1121,7 +1121,7 @@ public class Def { if (value instanceof Boolean) { return ((Boolean)value) ? 1 : 0; } else if (value instanceof Character) { - return (int)(char)value; + return (char)value; } else { return ((Number)value).intValue(); } @@ -1131,7 +1131,7 @@ public class Def { if (value instanceof Boolean) { return ((Boolean)value) ? 1L : 0; } else if (value instanceof Character) { - return (long)(char)value; + return (char)value; } else { return ((Number)value).longValue(); } @@ -1141,7 +1141,7 @@ public class Def { if (value instanceof Boolean) { return ((Boolean)value) ? (float)1 : 0; } else if (value instanceof Character) { - return (float)(char)value; + return (char)value; } else { return ((Number)value).floatValue(); } @@ -1151,7 +1151,7 @@ public class Def { if (value instanceof Boolean) { return ((Boolean)value) ? (double)1 : 0; } else if (value instanceof Character) { - return (double)(char)value; + return (char)value; } else { return ((Number)value).doubleValue(); } diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index 103a15784ea..dde9a63bad1 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -26,9 +26,6 @@ dependencies { compile 'org.python:jython-standalone:2.7.0' } -compileJava.options.compilerArgs << "-Xlint:-unchecked" -compileTestJava.options.compilerArgs << "-Xlint:-unchecked" - integTest { cluster { systemProperty 'es.script.inline', 'on' diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java index a0bfab43c54..fd876d8cee8 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java @@ -19,6 +19,11 @@ package org.elasticsearch.script.python; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; @@ -28,11 +33,6 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -58,6 +58,7 @@ public class PythonScriptEngineTests extends ESTestCase { assertThat(((Number) o).intValue(), equalTo(3)); } + @SuppressWarnings("unchecked") public void testMapAccess() { Map vars = new HashMap(); @@ -74,6 +75,7 @@ public class PythonScriptEngineTests extends ESTestCase { assertThat(((String) o), equalTo("2")); } + @SuppressWarnings("unchecked") public void testObjectMapInter() { Map vars = new HashMap(); Map ctx = new HashMap(); @@ -92,6 +94,7 @@ public class PythonScriptEngineTests extends ESTestCase { assertThat((String) ((Map) ctx.get("obj2")).get("prop2"), equalTo("value2")); } + @SuppressWarnings("unchecked") public void testAccessListInScript() { Map vars = new HashMap(); Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java index 06d3da03ab8..f24cc889a70 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.script.python; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; @@ -30,7 +31,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -55,8 +55,8 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void run() { try { barrier.await(); - long x = ThreadLocalRandom.current().nextInt(); - long y = ThreadLocalRandom.current().nextInt(); + long x = Randomness.get().nextInt(); + long y = Randomness.get().nextInt(); long addition = x + y; Map vars = new HashMap(); vars.put("x", x); @@ -97,13 +97,13 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { // @Override public void run() { // try { // barrier.await(); -// long x = ThreadLocalRandom.current().nextInt(); +// long x = Randomness.get().nextInt(); // Map vars = new HashMap(); // vars.put("x", x); // ExecutableScript script = se.executable(compiled, vars); // Map runtimeVars = new HashMap(); // for (int i = 0; i < 100000; i++) { -// long y = ThreadLocalRandom.current().nextInt(); +// long y = Randomness.get().nextInt(); // long addition = x + y; // runtimeVars.put("y", y); // long result = ((Number) script.run(runtimeVars)).longValue(); @@ -143,8 +143,8 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { barrier.await(); Map runtimeVars = new HashMap(); for (int i = 0; i < 10000; i++) { - long x = ThreadLocalRandom.current().nextInt(); - long y = ThreadLocalRandom.current().nextInt(); + long x = Randomness.get().nextInt(); + long y = Randomness.get().nextInt(); long addition = x + y; runtimeVars.put("x", x); runtimeVars.put("y", y); diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index ca93c118487..5b985d9138f 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -21,5 +21,3 @@ esplugin { description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' classname 'org.elasticsearch.plugin.mapper.MapperMurmur3Plugin' } - -compileJava.options.compilerArgs << "-Xlint:-rawtypes" diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 03b00d2ac39..ce78c75d783 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -19,6 +19,10 @@ package org.elasticsearch.index.mapper.murmur3; +import java.io.IOException; +import java.util.List; +import java.util.Map; + import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; @@ -35,10 +39,6 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; -import java.io.IOException; -import java.util.List; -import java.util.Map; - import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; public class Murmur3FieldMapper extends LongFieldMapper { @@ -93,8 +93,7 @@ public class Murmur3FieldMapper extends LongFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override - @SuppressWarnings("unchecked") - public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(name); // tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index 603fcbbf820..1b54b8293a7 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -52,7 +52,7 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { Collections.singletonMap(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser()), Collections.emptyMap()); parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); } public void testDefaults() throws Exception { @@ -128,7 +128,7 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); indexService = createIndex("test_bwc", settings); parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "murmur3") @@ -144,7 +144,7 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); indexService = createIndex("test_bwc", settings); parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "murmur3") diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 403eb284f96..956dd29402b 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -58,7 +58,7 @@ public class SizeMappingTests extends ESSingleNodeTestCase { Map metadataMappers = new HashMap<>(); IndicesModule indices = new IndicesModule(); indices.registerMetadataMapper(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser()); - mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), indices.getMapperRegistry()); + mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), indices.getMapperRegistry(), indexService::getQueryShardContext); parser = mapperService.documentMapperParser(); } @@ -90,7 +90,7 @@ public class SizeMappingTests extends ESSingleNodeTestCase { Collections.emptyMap(), Collections.singletonMap(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser())); parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService, - indexService.analysisService(), indexService.similarityService(), mapperRegistry); + indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext); DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); BytesReference source = XContentFactory.jsonBuilder() diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_warmer.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_warmer.json deleted file mode 100644 index 7284da6291a..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_warmer.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "indices.delete_warmer": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-warmers.html", - "methods": ["DELETE"], - "url": { - "path": "/{index}/_warmer/{name}", - "paths": ["/{index}/_warmer/{name}", "/{index}/_warmers/{name}"], - "parts": { - "index": { - "type" : "list", - "required" : true, - "description" : "A comma-separated list of index names to delete warmers from (supports wildcards); use `_all` to perform the operation on all indices." - }, - "name" : { - "type" : "list", - "required" : true, - "description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters." - } - }, - "params": { - "master_timeout": { - "type" : "time", - "description" : "Specify timeout for connection to master" - }, - "name" : { - "type" : "list", - "description" : "A comma-separated list of warmer names to delete (supports wildcards); use `_all` to delete all warmers in the specified indices. You must specify a name either in the uri or in the parameters." - } - } - }, - "body": null - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index 5c426f962a7..2c0c59f6898 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -14,7 +14,7 @@ "feature":{ "type":"list", "description":"A comma-separated list of features", - "options": ["_settings", "_mappings", "_warmers", "_aliases"] + "options": ["_settings", "_mappings", "_aliases"] } }, "params":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_warmer.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_warmer.json deleted file mode 100644 index fbd7abbc34c..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_warmer.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "indices.get_warmer": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-warmers.html", - "methods": ["GET"], - "url": { - "path": "/_warmer", - "paths": [ "/_warmer", "/{index}/_warmer", "/{index}/_warmer/{name}", "/_warmer/{name}", "/{index}/{type}/_warmer/{name}"], - "parts": { - "index": { - "type" : "list", - "description" : "A comma-separated list of index names to restrict the operation; use `_all` to perform the operation on all indices" - }, - "name": { - "type" : "list", - "description" : "The name of the warmer (supports wildcards); leave empty to get all warmers" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types" - } - }, - "params": { - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local": { - "type": "boolean", - "description": "Return local information, do not retrieve the state from master node (default: false)" - } - } - }, - "body": null - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_warmer.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_warmer.json deleted file mode 100644 index 9039367d15f..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_warmer.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "indices.put_warmer": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-warmers.html", - "methods": ["PUT", "POST"], - "url": { - "path": "/{index}/_warmer/{name}", - "paths": ["/_warmer/{name}", "/{index}/_warmer/{name}", "/{index}/{type}/_warmer/{name}", "/_warmers/{name}", "/{index}/_warmers/{name}", "/{index}/{type}/_warmers/{name}"], - "parts": { - "index": { - "type" : "list", - "description" : "A comma-separated list of index names to register the warmer for; use `_all` or omit to perform the operation on all indices" - }, - "name": { - "type" : "string", - "required" : true, - "description" : "The name of the warmer" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of document types to register the warmer for; leave empty to perform the operation on all types" - } - }, - "params": { - "master_timeout": { - "type" : "time", - "description" : "Specify timeout for connection to master" - }, - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed) in the search request to warm" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices in the search request to warm. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both, in the search request to warm." - }, - "request_cache": { - "type" : "boolean", - "description" : "Specify whether the request to be warmed should use the request cache, defaults to index level setting" - } - } - }, - "body": { - "description" : "The search request definition for the warmer (query, filters, facets, sorting, etc)", - "required" : true - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml index acb4da22716..3fc0e00d637 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yaml @@ -30,25 +30,6 @@ - match: { test_index.settings.index.number_of_replicas: "0"} ---- -"Create index with warmers": - - - do: - indices.create: - index: test_index - body: - warmers: - test_warmer: - source: - query: - match_all: {} - - - do: - indices.get_warmer: - index: test_index - - - match: {test_index.warmers.test_warmer.source.query.match_all: {}} - --- "Create index with aliases": @@ -81,49 +62,3 @@ - match: {test_index.aliases.test_clias.filter.term.field: value} - is_false: test_index.aliases.test_clias.index_routing - is_false: test_index.aliases.test_clias.search_routing - ---- -"Create index with mappings, settings, warmers and aliases": - - - do: - indices.create: - index: test_index - body: - mappings: - type_1: {} - settings: - number_of_replicas: "0" - warmers: - test_warmer: - source: - query: - match_all: {} - aliases: - test_alias: {} - test_blias: {routing: b} - - - do: - indices.get_mapping: - index: test_index - - - match: { test_index.mappings.type_1: {}} - - - do: - indices.get_settings: - index: test_index - - - match: { test_index.settings.index.number_of_replicas: "0"} - - - do: - indices.get_warmer: - index: test_index - - - match: { test_index.warmers.test_warmer.source.query.match_all: {}} - - - do: - indices.get_alias: - index: test_index - - - match: { test_index.aliases.test_alias: {}} - - match: { test_index.aliases.test_blias.search_routing: b} - - match: { test_index.aliases.test_blias.index_routing: b} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml deleted file mode 100644 index 603b01c15a6..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_warmer/all_path_options.yaml +++ /dev/null @@ -1,218 +0,0 @@ -setup: - - do: - indices.create: - index: test_index1 - body: - warmers: - test_warmer1: - source: - query: - match_all: {} - test_warmer2: - source: - query: - match_all: {} - - - do: - indices.create: - index: test_index2 - body: - warmers: - test_warmer1: - source: - query: - match_all: {} - test_warmer2: - source: - query: - match_all: {} - - - do: - indices.create: - index: foo - body: - warmers: - test_warmer1: - source: - query: - match_all: {} - test_warmer2: - source: - query: - match_all: {} - ---- -"Check setup": - - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.test_warmer1.source.query.match_all: {}} - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer1.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - - ---- -"check delete with _all index": - - do: - indices.delete_warmer: - index: _all - name: test_warmer1 - - - do: - indices.get_warmer: {} - - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - ---- -"check delete with * index": - - do: - indices.delete_warmer: - index: "*" - name: test_warmer1 - - - do: - indices.get_warmer: {} - - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - ---- -"check delete with index list": - - do: - indices.delete_warmer: - index: "test_index1,test_index2" - name: test_warmer1 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer1' } - - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer2' } - - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - ---- -"check delete with prefix* index": - - do: - indices.delete_warmer: - index: "test_*" - name: test_warmer1 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer1' } - - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer2' } - - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - - ---- -"check delete with index list and * warmers": - - do: - indices.delete_warmer: - index: "test_index1,test_index2" - name: "*" - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer1' } - - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer2' } - - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - ---- -"check delete with index list and _all warmers": - - do: - indices.delete_warmer: - index: "test_index1,test_index2" - name: _all - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer1' } - - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer2' } - - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - ---- -"check delete with index list and wildcard warmers": - - do: - indices.delete_warmer: - index: "test_index1,test_index2" - name: "*1" - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer1' } - - - match: {foo.warmers.test_warmer1.source.query.match_all: {}} - - is_false: test_index1 - - is_false: test_index2 - - - do: - indices.get_warmer: { index: _all, name: 'test_warmer2' } - - - match: {test_index1.warmers.test_warmer2.source.query.match_all: {}} - - match: {test_index2.warmers.test_warmer2.source.query.match_all: {}} - - match: {foo.warmers.test_warmer2.source.query.match_all: {}} - ---- -"check 404 on no matching test_warmer": - - do: - catch: missing - indices.delete_warmer: - index: "*" - name: "non_existent" - - - do: - catch: missing - indices.delete_warmer: - index: "non_existent" - name: "test_warmer1" - - ---- -"check delete with blank index and blank test_warmer": - - do: - catch: param - indices.delete_warmer: - name: "test_warmer1" - - - do: - catch: param - indices.delete_warmer: - index: "test_index1" - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yaml index 4c5251b36a3..218d1e0433c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yaml @@ -10,11 +10,6 @@ setup: test_blias: {} mappings: type_1: {} - warmers: - test_warmer: - source: - query: - match_all: {} settings: number_of_shards: 1 number_of_replicas: 1 @@ -59,7 +54,6 @@ setup: - is_true: test_index.aliases - is_true: test_index.settings - - is_true: test_index.warmers - is_true: test_index.mappings --- @@ -73,20 +67,6 @@ setup: - is_true: test_index.mappings - is_false: test_index.aliases - is_false: test_index.settings - - is_false: test_index.warmers - ---- -"Get index infos for mappings and warmers only": - - - do: - indices.get: - index: test_index - feature: _mapping,_warmer - - - is_true: test_index.mappings - - is_true: test_index.warmers - - is_false: test_index.aliases - - is_false: test_index.settings --- "Get index infos should work on aliases": @@ -94,10 +74,9 @@ setup: - do: indices.get: index: test_blias - feature: _mapping,_warmer + feature: _mapping - is_true: test_index.mappings - - is_true: test_index.warmers - is_false: test_index.aliases - is_false: test_index.settings @@ -113,7 +92,6 @@ setup: - is_true: test_index.settings - is_true: test_index_2.settings - is_false: test_index.aliases - - is_false: test_index.warmers --- "Get index infos with human settings should return index creation date and version in readable format": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/10_basic.yaml deleted file mode 100644 index 668a6119c3d..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/10_basic.yaml +++ /dev/null @@ -1,201 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_1 - body: - warmers: - warmer_1: - source: { query: { match_all: { }}} - warmer_2: - source: { query: { match_all: { }}} - - - - do: - indices.create: - index: test_2 - body: - warmers: - warmer_2: - source: { query: { match_all: { }}} - warmer_3: - source: { query: { match_all: { }}} - - - do: - cluster.health: - wait_for_status: yellow - ---- -"Get /_warmer": - - - do: - indices.get_warmer: {} - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_3.source.query.match_all: {}} - ---- -"Get /{index}/_warmer": - - - do: - indices.get_warmer: - index: test_1 - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_2 - - ---- -"Get /{index}/_warmer/_all": - - - do: - indices.get_warmer: - index: test_1 - name: _all - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_2 - ---- -"Get /{index}/_warmer/*": - - - do: - indices.get_warmer: - index: test_1 - name: '*' - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_2 - ---- -"Get /{index}/_warmer/{name}": - - - do: - indices.get_warmer: - index: test_1 - name: warmer_1 - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - is_false: test_1.warmers.warmer_2 - - is_false: test_2 - ---- -"Get /{index}/_warmer/{name,name}": - - - do: - indices.get_warmer: - index: test_1 - name: warmer_1,warmer_2 - - - match: { test_1.warmers.warmer_1.source.query.match_all: {}} - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_2 - ---- -"Get /{index}/_warmer/{name*}": - - - do: - indices.get_warmer: - index: test_1 - name: '*2' - - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_1.warmers.warmer_1 - - is_false: test_2 - ---- -"Get /_warmer/{name}": - - - do: - indices.get_warmer: - name: warmer_2 - - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_1.warmers.warmer_1 - - is_false: test_2.warmers.warmer_3 - ---- -"Get /_all/_warmer/{name}": - - - do: - indices.get_warmer: - index: _all - name: warmer_2 - - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_1.warmers.warmer_1 - - is_false: test_2.warmers.warmer_3 - ---- -"Get /*/_warmer/{name}": - - - do: - indices.get_warmer: - index: '*' - name: warmer_2 - - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_1.warmers.warmer_1 - - is_false: test_2.warmers.warmer_3 - ---- -"Get /index,index/_warmer/{name}": - - - do: - indices.get_warmer: - index: test_1,test_2 - name: warmer_2 - - - match: { test_1.warmers.warmer_2.source.query.match_all: {}} - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_2.warmers.warmer_3 - ---- -"Get /index*/_warmer/{name}": - - - do: - indices.get_warmer: - index: '*2' - name: warmer_2 - - - match: { test_2.warmers.warmer_2.source.query.match_all: {}} - - is_false: test_1 - - is_false: test_2.warmers.warmer_3 - ---- -"Empty response when no matching warmer": - - - do: - indices.get_warmer: - index: '*' - name: non_existent - - - match: { '': {}} - ---- -"Throw 404 on missing index": - - - do: - catch: missing - indices.get_warmer: - index: non_existent - name: '*' - ---- -"Get /_warmer with local flag": - - - do: - indices.get_warmer: - local: true - - - is_true: test_1 - - is_true: test_2 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/20_empty.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/20_empty.yaml deleted file mode 100644 index 702b0cd01d1..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_warmer/20_empty.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -setup: - - - do: - indices.create: - index: test_1 - - - do: - indices.create: - index: test_2 - ---- -"Check empty warmers when getting all warmers via /_warmer": - - - do: - indices.get_warmer: {} - - - match: { test_1.warmers: {}} - - match: { test_2.warmers: {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/10_basic.yaml deleted file mode 100644 index 7e4c57429ec..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/10_basic.yaml +++ /dev/null @@ -1,145 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_index - - - do: - indices.create: - index: test_idx - - - do: - cluster.health: - wait_for_status: yellow - - - do: - indices.put_warmer: - index: test_idx - name: test_warmer2 - body: - query: - match_all: {} - - - do: - indices.put_warmer: - index: test_index - name: test_warmer - body: - query: - match_all: {} - ---- -"Basic test for warmers": - - do: - indices.get_warmer: - index: test_index - name: test_warmer - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - - do: - indices.delete_warmer: - index: test_index - name: test_warmer - - - do: - indices.get_warmer: - index: test_index - name: test_warmer - - - match: { '': {}} - ---- -"Getting all warmers via /_warmer should work": - - - do: - indices.get_warmer: {} - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - - ---- -"Getting warmers for several indices should work using *": - - - do: - indices.get_warmer: - index: '*' - name: '*' - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - ---- -"Getting warmers for several indices should work using _all": - - - do: - indices.get_warmer: - index: _all - name: _all - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - ---- -"Getting all warmers without specifying index should work": - - - do: - indices.get_warmer: - name: _all - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - ---- -"Getting warmers for several indices should work using prefix*": - - - do: - indices.get_warmer: - index: test_i* - name: test_w* - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - ---- -"Getting warmers for several indices should work using comma-separated lists": - - - do: - indices.get_warmer: - index: test_index,test_idx - name: test_warmer,test_warmer2 - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - match: {test_idx.warmers.test_warmer2.source.query.match_all: {boost: 1.0}} - ---- -"Getting a non-existent warmer on an existing index should return an empty body": - - - do: - indices.get_warmer: - index: test_index - name: non-existent - - - match: { '': {}} - ---- -"Getting an existent and non-existent warmer should return the existent and no data about the non-existent warmer": - - - do: - indices.get_warmer: - index: test_index - name: test_warmer,non-existent - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - - is_false: test_index.warmers.non-existent - ---- -"Getting warmer on an non-existent index should return 404": - - - do: - catch: missing - indices.get_warmer: - index: non-existent - name: foo - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/20_aliases.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/20_aliases.yaml deleted file mode 100644 index b8a2fa6b27e..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/20_aliases.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -"Getting warmer for aliases should return the real index as key": - - - do: - indices.create: - index: test_index - - - do: - cluster.health: - wait_for_status: yellow - - - do: - indices.put_warmer: - index: test_index - name: test_warmer - body: - query: - match_all: {} - - - do: - indices.put_alias: - index: test_index - name: test_alias - - - do: - indices.get_warmer: - index: test_alias - - - match: {test_index.warmers.test_warmer.source.query.match_all: {boost: 1.0}} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/all_path_options.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/all_path_options.yaml deleted file mode 100644 index ffad427101a..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_warmer/all_path_options.yaml +++ /dev/null @@ -1,134 +0,0 @@ ---- -setup: - - - do: - indices.create: - index: test_index1 - - - do: - indices.create: - index: test_index2 - - - do: - indices.create: - index: foo - - - do: - cluster.health: - wait_for_status: yellow - ---- -"put warmer per index": - - - do: - indices.put_warmer: - index: test_index1 - name: warmer - body: - query: - match_all: {} - - do: - indices.put_warmer: - index: test_index2 - name: warmer - body: - query: - match_all: {} - - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - is_false: foo - ---- -"put warmer in _all index": - - do: - indices.put_warmer: - index: _all - name: warmer - body: - query: - match_all: {} - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {foo.warmers.warmer.source.query.match_all: {boost: 1.0}} - ---- -"put warmer in * index": - - do: - indices.put_warmer: - index: "*" - name: warmer - body: - query: - match_all: {} - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {foo.warmers.warmer.source.query.match_all: {boost: 1.0}} - ---- -"put warmer prefix* index": - - do: - indices.put_warmer: - index: "test_index*" - name: warmer - body: - query: - match_all: {} - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - is_false: foo - ---- -"put warmer in list of indices": - - do: - indices.put_warmer: - index: [test_index1, test_index2] - name: warmer - body: - query: - match_all: {} - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - is_false: foo - ---- -"put warmer with blank index": - - do: - indices.put_warmer: - name: warmer - body: - query: - match_all: {} - - do: - indices.get_warmer: { index: _all, name: '*' } - - - match: {test_index1.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {test_index2.warmers.warmer.source.query.match_all: {boost: 1.0}} - - match: {foo.warmers.warmer.source.query.match_all: {boost: 1.0}} - ---- -"put warmer with missing name": - - - - do: - catch: param - indices.put_warmer: - body: - query: - match_all: {} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml index fef50208749..66d62e49635 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml @@ -16,6 +16,9 @@ query: match_all: {} + - do: + indices.refresh: {} + - do: mpercolate: body: @@ -33,8 +36,7 @@ index: percolator_index type: my_type id: 1 - - doc: - foo: bar + - {} - match: { responses.0.total: 1 } - match: { responses.1.error.root_cause.0.type: index_not_found_exception } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 5039036be46..5c607e1aa61 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -30,9 +30,10 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" + compile 'org.elasticsearch:securemock:1.2' } -compileJava.options.compilerArgs << '-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked' +compileJava.options.compilerArgs << '-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' // the main files are actually test files, so use the appopriate forbidden api sigs diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java index 8b529f9fc8f..1c110bc405a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java @@ -58,6 +58,7 @@ public class MapperTestUtils { return new MapperService(indexSettings, analysisService, similarityService, - mapperRegistry); + mapperRegistry, + () -> null); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 58a72789f65..3128a2220ae 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockFactory; +import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.util.LuceneTestCase; @@ -113,10 +114,6 @@ public class MockFSDirectoryService extends FsDirectoryService { if (!Lucene.indexExists(dir)) { return; } - if (IndexWriter.isLocked(dir)) { - ESTestCase.checkIndexFailed = true; - throw new IllegalStateException("IndexWriter is still open on shard " + shardId); - } try (CheckIndex checkIndex = new CheckIndex(dir)) { BytesStreamOutput os = new BytesStreamOutput(); PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name()); @@ -134,6 +131,9 @@ public class MockFSDirectoryService extends FsDirectoryService { logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8)); } } + } catch (LockObtainFailedException e) { + ESTestCase.checkIndexFailed = true; + throw new IllegalStateException("IndexWriter is still open on shard " + shardId, e); } } catch (Exception e) { logger.warn("failed to check index", e);