diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6c5923e76a8..4491737e68e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -74,7 +74,7 @@ Then sit back and wait. There will probably be discussion about the pull request Contributing to the Elasticsearch codebase ------------------------------------------ -**Repository:** [https://github.com/elasticsearch/elasticsearch](https://github.com/elastic/elasticsearch) +**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace` and make sure to select `Search for nested projects...` option as Elasticsearch is a multi-module maven project. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. Please make sure the [m2e-connector](http://marketplace.eclipse.org/content/m2e-connector-maven-dependency-plugin) is not installed in your Eclipse distribution as it will interfere with setup performed by `mvn eclipse:eclipse`. diff --git a/Vagrantfile b/Vagrantfile index ab0e3224015..7c76e23df8e 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -32,7 +32,10 @@ Vagrant.configure(2) do |config| end config.vm.define "vivid" do |config| config.vm.box = "ubuntu/vivid64" - ubuntu_common config + ubuntu_common config, extra: <<-SHELL + # Install Jayatana so we can work around it being present. + [ -f /usr/share/java/jayatanaag.jar ] || install jayatana + SHELL end # Wheezy's backports don't contain Openjdk 8 and the backflips required to # get the sun jdk on there just aren't worth it. We have jessie for testing @@ -116,11 +119,11 @@ SOURCE_PROMPT end end -def ubuntu_common(config) - deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*' +def ubuntu_common(config, extra: '') + deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*', extra: extra end -def deb_common(config, add_openjdk_repository_command, openjdk_list) +def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '') # http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html config.vm.provision "fix-no-tty", type: "shell" do |s| s.privileged = false @@ -137,6 +140,7 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list) (echo "Importing java-8 ppa" && #{add_openjdk_repository_command} && apt-get update) + #{extra} SHELL ) end diff --git a/core/pom.xml b/core/pom.xml index 97c9bee87f6..75dc96384f3 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -372,12 +372,17 @@ src/main/java/org/elasticsearch/common/inject/** - src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java + src/main/java/org/apache/lucene/**/X*.java - - src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java src/main/java/org/elasticsearch/http/netty/pipelining/** + + src/main/java/org/elasticsearch/common/network/InetAddresses.java + src/test/java/org/elasticsearch/common/network/InetAddressesTests.java + src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java + + src/main/java/org/joda/time/base/BaseDateTime.java + src/main/java/org/joda/time/format/StrictISODateTimeFormat.java diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java index 75ad81b07d8..2f7d53870cc 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java +++ b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java @@ -1,19 +1,20 @@ /* - * Licensed to Elasticsearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Elasticsearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java index 67373ef116d..30f57b2626c 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java @@ -1,19 +1,20 @@ /* - * Licensed to Elasticsearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Elasticsearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java index a756de65115..f3bfa1b9c65 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java +++ b/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java @@ -1,19 +1,20 @@ /* - * Licensed to Elasticsearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Elasticsearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 87348f9dea5..62eb374733e 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -482,7 +482,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19), ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20), ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21), - CREATE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.CreateFailedEngineException.class, org.elasticsearch.index.engine.CreateFailedEngineException::new, 22), + // 22 was CreateFailedEngineException INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23), SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24), SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25), @@ -514,7 +514,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51), VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52), ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53), - DOCUMENT_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, org.elasticsearch.index.engine.DocumentAlreadyExistsException::new, 54), + // 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55), SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56), INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57), @@ -524,7 +524,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61), NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62), ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63), - DELETE_BY_QUERY_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, org.elasticsearch.index.engine.DeleteByQueryFailedEngineException::new, 64), + // 64 was DeleteByQueryFailedEngineException, which was removed in 3.0 GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65), INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66), HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67), diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 3fa9533269e..a610d8ddccb 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -259,6 +259,8 @@ public class Version { public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_0_0_beta2_ID = 2000002; public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final int V_2_0_0_rc1_ID = 2000051; + public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_0_0_ID = 2000099; public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; @@ -287,6 +289,8 @@ public class Version { return V_2_1_0; case V_2_0_0_ID: return V_2_0_0; + case V_2_0_0_rc1_ID: + return V_2_0_0_rc1; case V_2_0_0_beta2_ID: return V_2_0_0_beta2; case V_2_0_0_beta1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index cc41cc0ee8f..f8634b1c618 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -121,8 +121,8 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateAction; -import org.elasticsearch.action.admin.indices.validate.template.TransportRenderSearchTemplateAction; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction; +import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction; diff --git a/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java b/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java index dd0968ee4ea..ff31bb715db 100644 --- a/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java +++ b/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java @@ -21,6 +21,7 @@ package org.elasticsearch.action; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -32,8 +33,8 @@ import java.io.IOException; */ public class UnavailableShardsException extends ElasticsearchException { - public UnavailableShardsException(@Nullable ShardId shardId, String message) { - super(buildMessage(shardId, message)); + public UnavailableShardsException(@Nullable ShardId shardId, String message, Object... args) { + super(buildMessage(shardId, message), args); } private static String buildMessage(ShardId shardId, String message) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateAction.java similarity index 92% rename from core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateAction.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateAction.java index 0cc71589576..427d0c4dc64 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.validate.template; +package org.elasticsearch.action.admin.cluster.validate.template; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; @@ -25,7 +25,7 @@ import org.elasticsearch.client.ElasticsearchClient; public class RenderSearchTemplateAction extends Action { public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction(); - public static final String NAME = "indices:admin/render/template/search"; + public static final String NAME = "cluster:admin/render/template/search"; public RenderSearchTemplateAction() { super(NAME); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java similarity index 97% rename from core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequest.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java index bde255f2f7d..a51090e1402 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.validate.template; +package org.elasticsearch.action.admin.cluster.validate.template; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequestBuilder.java similarity index 96% rename from core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequestBuilder.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequestBuilder.java index 493dc7edc34..f7e3da111b2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequestBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.validate.template; +package org.elasticsearch.action.admin.cluster.validate.template; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java similarity index 97% rename from core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateResponse.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java index 2d3ca018ed2..d14a9a4f06a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/RenderSearchTemplateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.validate.template; +package org.elasticsearch.action.admin.cluster.validate.template; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java similarity index 97% rename from core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java index e9208ecb02c..5fe8297a6ba 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.validate.template; +package org.elasticsearch.action.admin.cluster.validate.template; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java index 4132c7b6b91..7cbb664958d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.segments; -import com.google.common.collect.Iterators; import org.elasticsearch.index.shard.ShardId; +import java.util.Arrays; import java.util.Iterator; public class IndexShardSegments implements Iterable { @@ -49,6 +49,6 @@ public class IndexShardSegments implements Iterable { @Override public Iterator iterator() { - return Iterators.forArray(shards); + return Arrays.stream(shards).iterator(); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index e59946815d8..dd4cc640a3d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.stats; -import com.google.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.Arrays; import java.util.Iterator; /** @@ -57,7 +57,7 @@ public class IndexShardStats implements Iterable, Streamable { @Override public Iterator iterator() { - return Iterators.forArray(shards); + return Arrays.stream(shards).iterator(); } private CommonStats total = null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java index e1cd16370c3..c4b5820e28b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.upgrade.get; -import com.google.common.collect.Iterators; import org.elasticsearch.index.shard.ShardId; +import java.util.Arrays; import java.util.Iterator; public class IndexShardUpgradeStatus implements Iterable { @@ -49,7 +49,7 @@ public class IndexShardUpgradeStatus implements Iterable { @Override public Iterator iterator() { - return Iterators.forArray(shards); + return Arrays.stream(shards).iterator(); } public long getTotalBytes() { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 50525fb050c..2076086a8a9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.bulk; -import com.google.common.collect.Iterators; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.util.Arrays; import java.util.Iterator; /** @@ -95,7 +95,7 @@ public class BulkResponse extends ActionResponse implements Iterable iterator() { - return Iterators.forArray(responses); + return Arrays.stream(responses).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index c0718859bb3..0f00b87b12a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -47,7 +47,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.Mapping; @@ -97,6 +96,7 @@ public class TransportShardBulkAction extends TransportReplicationAction implements Iterable, CompositeIndicesRequest, RealtimeRequest { @@ -498,7 +495,7 @@ public class MultiGetRequest extends ActionRequest implements I @Override public Iterator iterator() { - return Iterators.unmodifiableIterator(items.iterator()); + return Collections.unmodifiableCollection(items).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 7abfb2b7d61..32e10b82de2 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -19,10 +19,8 @@ package org.elasticsearch.action.get; -import com.google.common.collect.Iterators; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -31,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; -import java.util.Collections; +import java.util.Arrays; import java.util.Iterator; public class MultiGetResponse extends ActionResponse implements Iterable, ToXContent { @@ -126,7 +124,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable iterator() { - return Iterators.forArray(responses); + return Arrays.stream(responses).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index c171ae9af1a..ad7b9c11763 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -49,14 +49,14 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.elasticsearch.client.Requests#indexRequest(String)}. - *

+ * * The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and * {@link #source(byte[])} to be set. - *

+ * * The source (content to index) can be set in its bytes form using ({@link #source(byte[])}), * its string form ({@link #source(String)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder} * ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}). - *

+ * * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse @@ -114,7 +114,7 @@ public class IndexRequest extends ReplicationRequest implements Do public static OpType fromString(String sOpType) { String lowersOpType = sOpType.toLowerCase(Locale.ROOT); - switch(lowersOpType){ + switch (lowersOpType) { case "create": return OpType.CREATE; case "index": @@ -216,6 +216,14 @@ public class IndexRequest extends ReplicationRequest implements Do if (source == null) { validationException = addValidationError("source is missing", validationException); } + + if (opType() == OpType.CREATE) { + if (versionType != VersionType.INTERNAL || version != Versions.MATCH_DELETED) { + validationException = addValidationError("create operations do not support versioning. use index instead", validationException); + return validationException; + } + } + if (!versionType.validateVersionForWrites(version)) { validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } @@ -370,7 +378,7 @@ public class IndexRequest extends ReplicationRequest implements Do /** * Sets the document source to index. - *

+ * * Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)} * or using the {@link #source(byte[])}. */ @@ -480,6 +488,10 @@ public class IndexRequest extends ReplicationRequest implements Do */ public IndexRequest opType(OpType opType) { this.opType = opType; + if (opType == OpType.CREATE) { + version(Versions.MATCH_DELETED); + versionType(VersionType.INTERNAL); + } return this; } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 3e98f1a32c2..63b82377d8a 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -54,7 +54,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the index operation. - *

+ * * Allows for the following settings: *

    *
  • autoCreateIndex: When set to true, will automatically create an index if one does not exists. @@ -167,6 +167,7 @@ public class TransportIndexAction extends TransportReplicationAction result = executeIndexRequestOnPrimary(null, request, indexShard); + final IndexResponse response = result.response; final Translog.Location location = result.location; processAfter(request.refresh(), indexShard, location); @@ -180,18 +181,12 @@ public class TransportIndexAction extends TransportReplicationAction iterator() { - return Iterators.forArray(items); + return Arrays.stream(items).iterator(); } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index ffe476366f7..3540daa255c 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -69,6 +69,8 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Fields.SUCCEEDED, succeeded); + builder.field(Fields.NUMFREED, numFreed); return builder; } @@ -85,4 +87,10 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont out.writeBoolean(succeeded); out.writeVInt(numFreed); } + + static final class Fields { + static final XContentBuilderString SUCCEEDED = new XContentBuilderString("succeeded"); + static final XContentBuilderString NUMFREED = new XContentBuilderString("num_freed"); + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 80745652be3..0a9d6191030 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import com.google.common.collect.Iterators; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; @@ -32,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; -import java.util.Collections; +import java.util.Arrays; import java.util.Iterator; /** @@ -122,7 +121,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable iterator() { - return Iterators.forArray(items); + return Arrays.stream(items).iterator(); } /** diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 118e1124c33..0597c26f636 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -95,17 +96,22 @@ public abstract class TransportNodesAction listener; - private final ClusterState clusterState; private final AtomicReferenceArray responses; private final AtomicInteger counter = new AtomicInteger(); private AsyncAction(NodesRequest request, ActionListener listener) { this.request = request; this.listener = listener; - clusterState = clusterService.state(); + ClusterState clusterState = clusterService.state(); String[] nodesIds = resolveNodes(request, clusterState); this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds); + ImmutableOpenMap nodes = clusterState.nodes().nodes(); + this.nodes = new DiscoveryNode[nodesIds.length]; + for (int i = 0; i < nodesIds.length; i++) { + this.nodes[i] = nodes.get(nodesIds[i]); + } this.responses = new AtomicReferenceArray<>(this.nodesIds.length); } @@ -128,7 +134,7 @@ public abstract class TransportNodesAction executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable { - Engine.IndexingOperation operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); + Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = indexShard.shardId(); if (update != null) { @@ -1064,7 +1055,7 @@ public abstract class TransportReplicationAction listener) { + AsyncSingleAction(Request request, ActionListener listener) { this.request = request; this.listener = listener; } @@ -123,14 +125,14 @@ public abstract class TransportInstanceSingleOperationAction iterator() { - return Iterators.unmodifiableIterator(requests.iterator()); + return Collections.unmodifiableCollection(requests).iterator(); } public boolean isEmpty() { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index fe013d540d2..6eb3b3277e6 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.termvectors; -import com.google.common.collect.Iterators; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; +import java.util.Arrays; import java.util.Iterator; public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContent { @@ -120,7 +120,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable @Override public Iterator iterator() { - return Iterators.forArray(responses); + return Arrays.stream(responses).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 7479416b122..2a639c83ad1 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -48,9 +48,8 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.engine.DocumentAlreadyExistsException; -import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; @@ -170,7 +169,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.operation()) { case UPSERT: - IndexRequest upsertRequest = new IndexRequest((IndexRequest)result.action(), request); + IndexRequest upsertRequest = new IndexRequest(result.action(), request); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference upsertSourceBytes = upsertRequest.source(); indexAction.execute(upsertRequest, new ActionListener() { @@ -189,7 +188,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Override public void onFailure(Throwable e) { e = ExceptionsHelper.unwrapCause(e); - if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) { + if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override @@ -205,7 +204,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio }); break; case INDEX: - IndexRequest indexRequest = new IndexRequest((IndexRequest)result.action(), request); + IndexRequest indexRequest = new IndexRequest(result.action(), request); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); indexAction.execute(indexRequest, new ActionListener() { @@ -235,7 +234,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio }); break; case DELETE: - DeleteRequest deleteRequest = new DeleteRequest((DeleteRequest)result.action(), request); + DeleteRequest deleteRequest = new DeleteRequest(result.action(), request); deleteAction.execute(deleteRequest, new ActionListener() { @Override public void onResponse(DeleteResponse response) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 542444b4097..9ebb2c97627 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; @@ -249,13 +248,13 @@ final class Bootstrap { Environment environment = initialSettings(foreground); Settings settings = environment.settings(); + setupLogging(settings, environment); + checkForCustomConfFile(); if (environment.pidFile() != null) { PidFile.create(environment.pidFile(), true); } - setupLogging(settings, environment); - if (System.getProperty("es.max-open-files", "false").equals("true")) { ESLogger logger = Loggers.getLogger(Bootstrap.class); logger.info("max_open_files [{}]", ProcessProbe.getInstance().getMaxFileDescriptorCount()); @@ -330,4 +329,21 @@ final class Bootstrap { System.err.flush(); } } + + private static void checkForCustomConfFile() { + String confFileSetting = System.getProperty("es.default.config"); + checkUnsetAndMaybeExit(confFileSetting, "es.default.config"); + confFileSetting = System.getProperty("es.config"); + checkUnsetAndMaybeExit(confFileSetting, "es.config"); + confFileSetting = System.getProperty("elasticsearch.config"); + checkUnsetAndMaybeExit(confFileSetting, "elasticsearch.config"); + } + + private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) { + if (confFileSetting != null && confFileSetting.isEmpty() == false) { + ESLogger logger = Loggers.getLogger(Bootstrap.class); + logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName); + System.exit(1); + } + } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index b27048d22bd..66dda6ee774 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -165,7 +165,7 @@ final class Security { Map m = new HashMap<>(); m.put("repository-s3", "org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin"); m.put("discovery-ec2", "org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin"); - m.put("cloud-gce", "org.elasticsearch.plugin.cloud.gce.CloudGcePlugin"); + m.put("discovery-gce", "org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin"); m.put("lang-expression", "org.elasticsearch.script.expression.ExpressionPlugin"); m.put("lang-groovy", "org.elasticsearch.script.groovy.GroovyPlugin"); m.put("lang-javascript", "org.elasticsearch.plugin.javascript.JavaScriptPlugin"); diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index eafac2b6509..6e0b0b27fd9 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -21,9 +21,6 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index c3eb51585c2..1be22b257e7 100644 --- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -77,6 +77,9 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; /** * Administrative actions/operations against indices. @@ -423,4 +426,25 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ SnapshotsStatusRequestBuilder prepareSnapshotStatus(); + + /** + * Return the rendered search request for a given search template. + * + * @param request The request + * @return The result future + */ + ActionFuture renderSearchTemplate(RenderSearchTemplateRequest request); + + /** + * Return the rendered search request for a given search template. + * + * @param request The request + * @param listener A listener to be notified of the result + */ + void renderSearchTemplate(RenderSearchTemplateRequest request, ActionListener listener); + + /** + * Return the rendered search request for a given search template. + */ + RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate(); } diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 755bf333e59..75cae17ea9d 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -105,9 +105,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; @@ -746,27 +743,6 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ ValidateQueryRequestBuilder prepareValidateQuery(String... indices); - /** - * Return the rendered search request for a given search template. - * - * @param request The request - * @return The result future - */ - ActionFuture renderSearchTemplate(RenderSearchTemplateRequest request); - - /** - * Return the rendered search request for a given search template. - * - * @param request The request - * @param listener A listener to be notified of the result - */ - void renderSearchTemplate(RenderSearchTemplateRequest request, ActionListener listener); - - /** - * Return the rendered search request for a given search template. - */ - RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate(); - /** * Puts an index search warmer to be applies when applicable. */ diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index f9abf2f0437..3fa5d789905 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -208,10 +208,10 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateAction; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder; @@ -1142,6 +1142,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE); } + + @Override + public ActionFuture renderSearchTemplate(final RenderSearchTemplateRequest request) { + return execute(RenderSearchTemplateAction.INSTANCE, request); + } + + @Override + public void renderSearchTemplate(final RenderSearchTemplateRequest request, final ActionListener listener) { + execute(RenderSearchTemplateAction.INSTANCE, request, listener); + } + + @Override + public RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate() { + return new RenderSearchTemplateRequestBuilder(this, RenderSearchTemplateAction.INSTANCE); + } } static class IndicesAdmin implements IndicesAdminClient { @@ -1617,21 +1632,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture renderSearchTemplate(final RenderSearchTemplateRequest request) { - return execute(RenderSearchTemplateAction.INSTANCE, request); - } - - @Override - public void renderSearchTemplate(final RenderSearchTemplateRequest request, final ActionListener listener) { - execute(RenderSearchTemplateAction.INSTANCE, request, listener); - } - - @Override - public RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate() { - return new RenderSearchTemplateRequestBuilder(this, RenderSearchTemplateAction.INSTANCE); - } - @Override public ActionFuture putWarmer(PutWarmerRequest request) { return execute(PutWarmerAction.INSTANCE, request); diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index b13c7991b5e..e3925aa6f4e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -73,7 +73,7 @@ public class MappingUpdatedAction extends AbstractComponent { throw new IllegalArgumentException("_default_ mapping should not be updated"); } return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) - .setMasterNodeTimeout(timeout).setTimeout(timeout); + .setMasterNodeTimeout(timeout).setTimeout(timeout); } public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 6ea1d0e6e61..9d110170f52 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -29,8 +28,6 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; -import org.elasticsearch.cluster.routing.HashFunction; -import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -167,16 +164,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String SETTING_PRIORITY = "index.priority"; public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string"; public static final String SETTING_INDEX_UUID = "index.uuid"; - public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type"; - public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type"; public static final String SETTING_DATA_PATH = "index.data_path"; public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; public static final String INDEX_UUID_NA_VALUE = "_na_"; - // hard-coded hash function as of 2.0 - // older indices will read which hash function to use in their index settings - private static final HashFunction MURMUR3_HASH_FUNCTION = new Murmur3HashFunction(); + private final String index; private final long version; @@ -200,8 +193,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild private final Version indexCreatedVersion; private final Version indexUpgradedVersion; private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; - private final HashFunction routingHashFunction; - private final boolean useTypeForRouting; private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs) { if (settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null) == null) { @@ -249,23 +240,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild } else { this.minimumCompatibleLuceneVersion = null; } - final String hashFunction = settings.get(SETTING_LEGACY_ROUTING_HASH_FUNCTION); - if (hashFunction == null) { - routingHashFunction = MURMUR3_HASH_FUNCTION; - } else { - final Class hashFunctionClass; - try { - hashFunctionClass = Class.forName(hashFunction).asSubclass(HashFunction.class); - } catch (ClassNotFoundException|NoClassDefFoundError e) { - throw new ElasticsearchException("failed to load custom hash function [" + hashFunction + "]", e); - } - try { - routingHashFunction = hashFunctionClass.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - throw new IllegalStateException("Cannot instantiate hash function", e); - } - } - useTypeForRouting = settings.getAsBoolean(SETTING_LEGACY_ROUTING_USE_TYPE, false); } public String index() { @@ -335,29 +309,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild return minimumCompatibleLuceneVersion; } - /** - * Return the {@link HashFunction} that should be used for routing. - */ - public HashFunction routingHashFunction() { - return routingHashFunction; - } - - public HashFunction getRoutingHashFunction() { - return routingHashFunction(); - } - - /** - * Return whether routing should use the _type in addition to the _id in - * order to decide which shard a document should go to. - */ - public boolean routingUseType() { - return useTypeForRouting; - } - - public boolean getRoutingUseType() { - return routingUseType(); - } - public long creationDate() { return settings.getAsLong(SETTING_CREATION_DATE, -1l); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index a17fe044dcb..cdde49170d4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -21,11 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.routing.DjbHashFunction; -import org.elasticsearch.cluster.routing.HashFunction; -import org.elasticsearch.cluster.routing.SimpleHashFunction; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -34,8 +30,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.similarity.SimilarityLookupService; -import org.elasticsearch.index.store.IndexStoreModule; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; import java.util.Locale; @@ -54,47 +49,12 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; */ public class MetaDataIndexUpgradeService extends AbstractComponent { - private static final String DEPRECATED_SETTING_ROUTING_HASH_FUNCTION = "cluster.routing.operation.hash.type"; - private static final String DEPRECATED_SETTING_ROUTING_USE_TYPE = "cluster.routing.operation.use_type"; - - private final Class pre20HashFunction; - private final Boolean pre20UseType; private final ScriptService scriptService; @Inject public MetaDataIndexUpgradeService(Settings settings, ScriptService scriptService) { super(settings); this.scriptService = scriptService; - final String pre20HashFunctionName = settings.get(DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, null); - final boolean hasCustomPre20HashFunction = pre20HashFunctionName != null; - // the hash function package has changed we replace the two hash functions if their fully qualified name is used. - if (hasCustomPre20HashFunction) { - switch (pre20HashFunctionName) { - case "Simple": - case "simple": - case "org.elasticsearch.cluster.routing.operation.hash.simple.SimpleHashFunction": - pre20HashFunction = SimpleHashFunction.class; - break; - case "Djb": - case "djb": - case "org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction": - pre20HashFunction = DjbHashFunction.class; - break; - default: - try { - pre20HashFunction = Class.forName(pre20HashFunctionName).asSubclass(HashFunction.class); - } catch (ClassNotFoundException|NoClassDefFoundError e) { - throw new ElasticsearchException("failed to load custom hash function [" + pre20HashFunctionName + "]", e); - } - } - } else { - pre20HashFunction = DjbHashFunction.class; - } - pre20UseType = settings.getAsBoolean(DEPRECATED_SETTING_ROUTING_USE_TYPE, null); - if (hasCustomPre20HashFunction || pre20UseType != null) { - logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they " - + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); - } } /** @@ -110,68 +70,29 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { return indexMetaData; } checkSupportedVersion(indexMetaData); - IndexMetaData newMetaData = upgradeLegacyRoutingSettings(indexMetaData); + IndexMetaData newMetaData = indexMetaData; newMetaData = addDefaultUnitsIfNeeded(newMetaData); checkMappingsCompatibility(newMetaData); - newMetaData = upgradeSettings(newMetaData); newMetaData = markAsUpgraded(newMetaData); return newMetaData; } - IndexMetaData upgradeSettings(IndexMetaData indexMetaData) { - final String storeType = indexMetaData.getSettings().get(IndexStoreModule.STORE_TYPE); - if (storeType != null) { - final String upgradeStoreType; - switch (storeType.toLowerCase(Locale.ROOT)) { - case "nio_fs": - case "niofs": - upgradeStoreType = "niofs"; - break; - case "mmap_fs": - case "mmapfs": - upgradeStoreType = "mmapfs"; - break; - case "simple_fs": - case "simplefs": - upgradeStoreType = "simplefs"; - break; - case "default": - upgradeStoreType = "default"; - break; - case "fs": - upgradeStoreType = "fs"; - break; - default: - upgradeStoreType = storeType; - } - if (storeType.equals(upgradeStoreType) == false) { - Settings indexSettings = Settings.builder().put(indexMetaData.settings()) - .put(IndexStoreModule.STORE_TYPE, upgradeStoreType) - .build(); - return IndexMetaData.builder(indexMetaData) - .version(indexMetaData.version()) - .settings(indexSettings) - .build(); - } - } - return indexMetaData; - } /** * Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks. */ private boolean isUpgraded(IndexMetaData indexMetaData) { - return indexMetaData.upgradeVersion().onOrAfter(Version.V_2_0_0_beta1); + return indexMetaData.upgradeVersion().onOrAfter(Version.V_3_0_0); } /** - * Elasticsearch 2.0 no longer supports indices with pre Lucene v4.0 (Elasticsearch v 0.90.0) segments. All indices - * that were created before Elasticsearch v0.90.0 should be upgraded using upgrade plugin before they can + * Elasticsearch 3.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices + * that were created before Elasticsearch v2.0.0.beta1 should be upgraded using upgrade API before they can * be open by this version of elasticsearch. */ private void checkSupportedVersion(IndexMetaData indexMetaData) { if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) { - throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v0.90.0 and wasn't upgraded." + throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded." + " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion() + " and upgraded using the upgrade API."); } @@ -181,44 +102,18 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * Returns true if this index can be supported by the current version of elasticsearch */ private static boolean isSupportedVersion(IndexMetaData indexMetaData) { - if (indexMetaData.creationVersion().onOrAfter(Version.V_0_90_0_Beta1)) { - // The index was created with elasticsearch that was using Lucene 4.0 + if (indexMetaData.creationVersion().onOrAfter(Version.V_2_0_0_beta1)) { + // The index was created with elasticsearch that was using Lucene 5.2.1 return true; } if (indexMetaData.getMinimumCompatibleVersion() != null && - indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_4_0_0)) { + indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) { //The index was upgraded we can work with it return true; } return false; } - /** - * Elasticsearch 2.0 deprecated custom routing hash functions. So what we do here is that for old indices, we - * move this old and deprecated node setting to an index setting so that we can keep things backward compatible. - */ - private IndexMetaData upgradeLegacyRoutingSettings(IndexMetaData indexMetaData) { - if (indexMetaData.settings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) == null - && indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) { - // these settings need an upgrade - Settings indexSettings = Settings.builder().put(indexMetaData.settings()) - .put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, pre20HashFunction) - .put(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE, pre20UseType == null ? false : pre20UseType) - .build(); - return IndexMetaData.builder(indexMetaData) - .version(indexMetaData.version()) - .settings(indexSettings) - .build(); - } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) { - if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null - || indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) { - throw new IllegalStateException("Index [" + indexMetaData.getIndex() + "] created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION - + "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in its index settings"); - } - } - return indexMetaData; - } - /** All known byte-sized settings for an index. */ public static final Set INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( "index.merge.policy.floor_segment", @@ -322,11 +217,11 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { Index index = new Index(indexMetaData.getIndex()); Settings settings = indexMetaData.settings(); try { - SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings); + SimilarityService similarityService = new SimilarityService(index, settings); // We cannot instantiate real analysis server at this point because the node might not have // been started yet. However, we don't really need real analyzers at this stage - so we can fake it try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) { - try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, scriptService)) { + try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityService, scriptService)) { for (ObjectCursor cursor : indexMetaData.getMappings().values()) { MappingMetaData mappingMetaData = cursor.value; mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java index 2f2155367d3..ca482ea604f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataService.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.math.MathUtils; @@ -43,6 +43,6 @@ public class MetaDataService extends AbstractComponent { } public Semaphore indexMetaDataLock(String index) { - return indexMdLocks[MathUtils.mod(DjbHashFunction.DJB_HASH(index), indexMdLocks.length)]; + return indexMdLocks[MathUtils.mod(Murmur3HashFunction.hash(index), indexMdLocks.length)]; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/DjbHashFunction.java b/core/src/main/java/org/elasticsearch/cluster/routing/DjbHashFunction.java deleted file mode 100644 index 7616bd382e1..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/routing/DjbHashFunction.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - -import org.elasticsearch.cluster.routing.HashFunction; - -/** - * This class implements the efficient hash function - * developed by Daniel J. Bernstein. - */ -public class DjbHashFunction implements HashFunction { - - public static int DJB_HASH(String value) { - long hash = 5381; - - for (int i = 0; i < value.length(); i++) { - hash = ((hash << 5) + hash) + value.charAt(i); - } - - return (int) hash; - } - - public static int DJB_HASH(byte[] value, int offset, int length) { - long hash = 5381; - - final int end = offset + length; - for (int i = offset; i < end; i++) { - hash = ((hash << 5) + hash) + value[i]; - } - - return (int) hash; - } - - @Override - public int hash(String routing) { - return DJB_HASH(routing); - } - - @Override - public int hash(String type, String id) { - long hash = 5381; - - for (int i = 0; i < type.length(); i++) { - hash = ((hash << 5) + hash) + type.charAt(i); - } - - for (int i = 0; i < id.length(); i++) { - hash = ((hash << 5) + hash) + id.charAt(i); - } - - return (int) hash; - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/HashFunction.java b/core/src/main/java/org/elasticsearch/cluster/routing/HashFunction.java deleted file mode 100644 index 99977eeccb2..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/routing/HashFunction.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - -/** - * Simple hash function interface used for shard routing. - */ -public interface HashFunction { - - /** - * Calculate a hash value for routing - * @param routing String to calculate the hash value from - * @return hash value of the given routing string - */ - int hash(String routing); - - /** - * Calculate a hash value for routing and its type - * @param type types name - * @param id String to calculate the hash value from - * @return hash value of the given type and routing string - */ - @Deprecated - int hash(String type, String id); -} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/Murmur3HashFunction.java b/core/src/main/java/org/elasticsearch/cluster/routing/Murmur3HashFunction.java index 7ca602a3574..4752271ec47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/Murmur3HashFunction.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/Murmur3HashFunction.java @@ -20,15 +20,17 @@ package org.elasticsearch.cluster.routing; import org.apache.lucene.util.StringHelper; -import org.elasticsearch.cluster.routing.HashFunction; /** * Hash function based on the Murmur3 algorithm, which is the default as of Elasticsearch 2.0. */ -public class Murmur3HashFunction implements HashFunction { +public final class Murmur3HashFunction { - @Override - public int hash(String routing) { + private Murmur3HashFunction() { + //no instance + } + + public static int hash(String routing) { final byte[] bytesToHash = new byte[routing.length() * 2]; for (int i = 0; i < routing.length(); ++i) { final char c = routing.charAt(i); @@ -37,12 +39,10 @@ public class Murmur3HashFunction implements HashFunction { bytesToHash[i * 2] = b1; bytesToHash[i * 2 + 1] = b2; } - return StringHelper.murmurhash3_x86_32(bytesToHash, 0, bytesToHash.length, 0); + return hash(bytesToHash, 0, bytesToHash.length); } - @Override - public int hash(String type, String id) { - throw new UnsupportedOperationException(); + public static int hash(byte[] bytes, int offset, int length) { + return StringHelper.murmurhash3_x86_32(bytes, offset, length, 0); } - } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 411a1ed6817..c142b754aa2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -47,7 +46,6 @@ import java.util.Set; public class OperationRouting extends AbstractComponent { - private final AwarenessAllocationDecider awarenessAllocationDecider; @Inject @@ -196,9 +194,9 @@ public class OperationRouting extends AbstractComponent { // if not, then use it as the index String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes(); if (awarenessAttributes.length == 0) { - return indexShard.activeInitializingShardsIt(DjbHashFunction.DJB_HASH(preference)); + return indexShard.activeInitializingShardsIt(Murmur3HashFunction.hash(preference)); } else { - return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, DjbHashFunction.DJB_HASH(preference)); + return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, Murmur3HashFunction.hash(preference)); } } @@ -237,37 +235,13 @@ public class OperationRouting extends AbstractComponent { @SuppressForbidden(reason = "Math#abs is trappy") private int shardId(ClusterState clusterState, String index, String type, String id, @Nullable String routing) { final IndexMetaData indexMetaData = indexMetaData(clusterState, index); - final Version createdVersion = indexMetaData.getCreationVersion(); - final HashFunction hashFunction = indexMetaData.getRoutingHashFunction(); - final boolean useType = indexMetaData.getRoutingUseType(); - final int hash; if (routing == null) { - if (!useType) { - hash = hash(hashFunction, id); - } else { - hash = hash(hashFunction, type, id); - } + hash = Murmur3HashFunction.hash(id); } else { - hash = hash(hashFunction, routing); + hash = Murmur3HashFunction.hash(routing); } - if (createdVersion.onOrAfter(Version.V_2_0_0_beta1)) { - return MathUtils.mod(hash, indexMetaData.numberOfShards()); - } else { - return Math.abs(hash % indexMetaData.numberOfShards()); - } - } - - protected int hash(HashFunction hashFunction, String routing) { - return hashFunction.hash(routing); - } - - @Deprecated - protected int hash(HashFunction hashFunction, String type, String id) { - if (type == null || "_all".equals(type)) { - throw new IllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)"); - } - return hashFunction.hash(type, id); + return MathUtils.mod(hash, indexMetaData.numberOfShards()); } private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 43ad6aff1ec..596bb97887c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -19,13 +19,10 @@ package org.elasticsearch.cluster.routing; -import com.google.common.collect.Iterators; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.Iterators; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; +import java.util.*; /** * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards @@ -51,7 +48,7 @@ public class RoutingNode implements Iterable { @Override public Iterator iterator() { - return Iterators.unmodifiableIterator(shards.iterator()); + return Collections.unmodifiableCollection(shards).iterator(); } Iterator mutableIterator() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index f632428e27a..d5ed922b120 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.Iterators; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; @@ -153,7 +152,7 @@ public class RoutingNodes implements Iterable { @Override public Iterator iterator() { - return Iterators.unmodifiableIterator(nodesToShards.values().iterator()); + return Collections.unmodifiableCollection(nodesToShards.values()).iterator(); } public RoutingTable routingTable() { diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java new file mode 100644 index 00000000000..d2d6970fe9e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -0,0 +1,690 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.util.concurrent.ReleasableLock; + +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.ToLongBiFunction; + +/** + * A simple concurrent cache. + *

    + * Cache is a simple concurrent cache that supports time-based and weight-based evictions, with notifications for all + * evictions. The design goals for this cache were simplicity and read performance. This means that we are willing to + * accept reduced write performance in exchange for easy-to-understand code. Cache statistics for hits, misses and + * evictions are exposed. + *

    + * The design of the cache is relatively simple. The cache is segmented into 256 segments which are backed by HashMaps. + * Each segment is protected by a re-entrant read/write lock. The read/write locks permit multiple concurrent readers + * without contention, and the segments gives us write throughput without impacting readers (so readers are blocked only + * if they are reading a segment that a writer is writing to). + *

    + * The LRU functionality is backed by a single doubly-linked list chaining the entries in order of insertion. This + * LRU list is protected by a lock that serializes all writes to it. There are opportunities for improvements + * here if write throughput is a concern. + *

      + *
    1. LRU list mutations could be inserted into a blocking queue that a single thread is reading from + * and applying to the LRU list.
    2. + *
    3. Promotions could be deferred for entries that were "recently" promoted.
    4. + *
    5. Locks on the list could be taken per node being modified instead of globally.
    6. + *
    + *

    + * Evictions only occur after a mutation to the cache (meaning an entry promotion, a cache insertion, or a manual + * invalidation) or an explicit call to {@link #refresh()}. + * + * @param The type of the keys + * @param The type of the values + */ +public class Cache { + // positive if entries have an expiration + private long expireAfterAccess = -1; + + // true if entries can expire after access + private boolean entriesExpireAfterAccess; + + // positive if entries have an expiration after write + private long expireAfterWrite = -1; + + // true if entries can expire after initial insertion + private boolean entriesExpireAfterWrite; + + // the number of entries in the cache + private int count = 0; + + // the weight of the entries in the cache + private long weight = 0; + + // the maximum weight that this cache supports + private long maximumWeight = -1; + + // the weigher of entries + private ToLongBiFunction weigher = (k, v) -> 1; + + // the removal callback + private RemovalListener removalListener = notification -> { + }; + + // use CacheBuilder to construct + Cache() { + } + + void setExpireAfterAccess(long expireAfterAccess) { + if (expireAfterAccess <= 0) { + throw new IllegalArgumentException("expireAfterAccess <= 0"); + } + this.expireAfterAccess = expireAfterAccess; + this.entriesExpireAfterAccess = true; + } + + void setExpireAfterWrite(long expireAfterWrite) { + if (expireAfterWrite <= 0) { + throw new IllegalArgumentException("expireAfterWrite <= 0"); + } + this.expireAfterWrite = expireAfterWrite; + this.entriesExpireAfterWrite = true; + } + + void setMaximumWeight(long maximumWeight) { + if (maximumWeight < 0) { + throw new IllegalArgumentException("maximumWeight < 0"); + } + this.maximumWeight = maximumWeight; + } + + void setWeigher(ToLongBiFunction weigher) { + Objects.requireNonNull(weigher); + this.weigher = weigher; + } + + void setRemovalListener(RemovalListener removalListener) { + Objects.requireNonNull(removalListener); + this.removalListener = removalListener; + } + + /** + * The relative time used to track time-based evictions. + * + * @return the current relative time + */ + protected long now() { + // System.nanoTime takes non-negligible time, so we only use it if we need it + // use System.nanoTime because we want relative time, not absolute time + return entriesExpireAfterAccess || entriesExpireAfterWrite ? System.nanoTime() : 0; + } + + // the state of an entry in the LRU list + enum State { + NEW, EXISTING, DELETED + } + + static class Entry { + final K key; + final V value; + long writeTime; + volatile long accessTime; + Entry before; + Entry after; + State state = State.NEW; + + public Entry(K key, V value, long writeTime) { + this.key = key; + this.value = value; + this.writeTime = this.accessTime = writeTime; + } + } + + /** + * A cache segment. + *

    + * A CacheSegment is backed by a HashMap and is protected by a read/write lock. + * + * @param the type of the keys + * @param the type of the values + */ + private static class CacheSegment { + // read/write lock protecting mutations to the segment + ReadWriteLock segmentLock = new ReentrantReadWriteLock(); + + ReleasableLock readLock = new ReleasableLock(segmentLock.readLock()); + ReleasableLock writeLock = new ReleasableLock(segmentLock.writeLock()); + + Map> map = new HashMap<>(); + SegmentStats segmentStats = new SegmentStats(); + + /** + * get an entry from the segment + * + * @param key the key of the entry to get from the cache + * @param now the access time of this entry + * @return the entry if there was one, otherwise null + */ + Entry get(K key, long now) { + Entry entry; + try (ReleasableLock ignored = readLock.acquire()) { + entry = map.get(key); + } + if (entry != null) { + segmentStats.hit(); + entry.accessTime = now; + } else { + segmentStats.miss(); + } + return entry; + } + + /** + * put an entry into the segment + * + * @param key the key of the entry to add to the cache + * @param value the value of the entry to add to the cache + * @param now the access time of this entry + * @return a tuple of the new entry and the existing entry, if there was one otherwise null + */ + Tuple, Entry> put(K key, V value, long now) { + Entry entry = new Entry<>(key, value, now); + Entry existing; + try (ReleasableLock ignored = writeLock.acquire()) { + existing = map.put(key, entry); + } + return Tuple.tuple(entry, existing); + } + + /** + * remove an entry from the segment + * + * @param key the key of the entry to remove from the cache + * @return the removed entry if there was one, otherwise null + */ + Entry remove(K key) { + Entry entry; + try (ReleasableLock ignored = writeLock.acquire()) { + entry = map.remove(key); + } + if (entry != null) { + segmentStats.eviction(); + } + return entry; + } + + private static class SegmentStats { + private final LongAdder hits = new LongAdder(); + private final LongAdder misses = new LongAdder(); + private final LongAdder evictions = new LongAdder(); + + void hit() { + hits.increment(); + } + + void miss() { + misses.increment(); + } + + void eviction() { + evictions.increment(); + } + } + } + + public static final int NUMBER_OF_SEGMENTS = 256; + private final CacheSegment[] segments = new CacheSegment[NUMBER_OF_SEGMENTS]; + + { + for (int i = 0; i < segments.length; i++) { + segments[i] = new CacheSegment<>(); + } + } + + Entry head; + Entry tail; + + // lock protecting mutations to the LRU list + private ReleasableLock lruLock = new ReleasableLock(new ReentrantLock()); + + /** + * Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key. + * + * @param key the key whose associated value is to be returned + * @return the value to which the specified key is mapped, or null if this map contains no mapping for the key + */ + public V get(K key) { + return get(key, now()); + } + + private V get(K key, long now) { + CacheSegment segment = getCacheSegment(key); + Entry entry = segment.get(key, now); + if (entry == null || isExpired(entry, now)) { + return null; + } else { + promote(entry, now); + return entry.value; + } + } + + /** + * If the specified key is not already associated with a value (or is mapped to null), attempts to compute its + * value using the given mapping function and enters it into this map unless null. + * + * @param key the key whose associated value is to be returned or computed for if non-existant + * @param loader the function to compute a value given a key + * @return the current (existing or computed) value associated with the specified key, or null if the computed + * value is null + * @throws ExecutionException thrown if loader throws an exception + */ + public V computeIfAbsent(K key, CacheLoader loader) throws ExecutionException { + long now = now(); + V value = get(key, now); + if (value == null) { + CacheSegment segment = getCacheSegment(key); + // we synchronize against the segment lock; this is to avoid a scenario where another thread is inserting + // a value for the same key via put which would not be observed on this thread without a mechanism + // synchronizing the two threads; it is possible that the segment lock will be too expensive here (it blocks + // readers too!) so consider this as a possible place to optimize should contention be observed + try (ReleasableLock ignored = segment.writeLock.acquire()) { + value = get(key, now); + if (value == null) { + try { + value = loader.load(key); + } catch (Exception e) { + throw new ExecutionException(e); + } + if (value == null) { + throw new ExecutionException(new NullPointerException("loader returned a null value")); + } + put(key, value, now); + } + } + } + return value; + } + + /** + * Associates the specified value with the specified key in this map. If the map previously contained a mapping for + * the key, the old value is replaced. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + */ + public void put(K key, V value) { + long now = now(); + put(key, value, now); + } + + private void put(K key, V value, long now) { + CacheSegment segment = getCacheSegment(key); + Tuple, Entry> tuple = segment.put(key, value, now); + boolean replaced = false; + try (ReleasableLock ignored = lruLock.acquire()) { + if (tuple.v2() != null && tuple.v2().state == State.EXISTING) { + if (unlink(tuple.v2())) { + replaced = true; + } + } + promote(tuple.v1(), now); + } + if (replaced) { + removalListener.onRemoval(new RemovalNotification(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED)); + } + } + + /** + * Invalidate the association for the specified key. A removal notification will be issued for invalidated + * entries with {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED. + * + * @param key the key whose mapping is to be invalidated from the cache + */ + public void invalidate(K key) { + CacheSegment segment = getCacheSegment(key); + Entry entry = segment.remove(key); + if (entry != null) { + try (ReleasableLock ignored = lruLock.acquire()) { + delete(entry, RemovalNotification.RemovalReason.INVALIDATED); + } + } + } + + /** + * Invalidate all cache entries. A removal notification will be issued for invalidated entries with + * {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED. + */ + public void invalidateAll() { + Entry h; + + boolean[] haveSegmentLock = new boolean[NUMBER_OF_SEGMENTS]; + try { + for (int i = 0; i < NUMBER_OF_SEGMENTS; i++) { + segments[i].segmentLock.writeLock().lock(); + haveSegmentLock[i] = true; + } + try (ReleasableLock ignored = lruLock.acquire()) { + h = head; + Arrays.stream(segments).forEach(segment -> segment.map = new HashMap<>()); + Entry current = head; + while (current != null) { + current.state = State.DELETED; + current = current.after; + } + head = tail = null; + count = 0; + weight = 0; + } + } finally { + for (int i = NUMBER_OF_SEGMENTS - 1; i >= 0; i--) { + if (haveSegmentLock[i]) { + segments[i].segmentLock.writeLock().unlock(); + } + } + } + while (h != null) { + removalListener.onRemoval(new RemovalNotification<>(h.key, h.value, RemovalNotification.RemovalReason.INVALIDATED)); + h = h.after; + } + } + + /** + * Force any outstanding size-based and time-based evictions to occur + */ + public void refresh() { + long now = now(); + try (ReleasableLock ignored = lruLock.acquire()) { + evict(now); + } + } + + /** + * The number of entries in the cache. + * + * @return the number of entries in the cache + */ + public int count() { + return count; + } + + /** + * The weight of the entries in the cache. + * + * @return the weight of the entries in the cache + */ + public long weight() { + return weight; + } + + /** + * An LRU sequencing of the keys in the cache that supports removal. This sequence is not protected from mutations + * to the cache (except for {@link Iterator#remove()}. The result of iteration under any other mutation is + * undefined. + * + * @return an LRU-ordered {@link Iterable} over the keys in the cache + */ + public Iterable keys() { + return () -> new Iterator() { + private CacheIterator iterator = new CacheIterator(head); + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public K next() { + return iterator.next().key; + } + + @Override + public void remove() { + iterator.remove(); + } + }; + } + + /** + * An LRU sequencing of the values in the cache. This sequence is not protected from mutations + * to the cache. The result of iteration under mutation is undefined. + * + * @return an LRU-ordered {@link Iterable} over the values in the cache + */ + public Iterable values() { + return () -> new Iterator() { + private CacheIterator iterator = new CacheIterator(head); + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public V next() { + return iterator.next().value; + } + }; + } + + private class CacheIterator implements Iterator> { + private Entry current; + private Entry next; + + CacheIterator(Entry head) { + current = null; + next = head; + } + + @Override + public boolean hasNext() { + return next != null; + } + + @Override + public Entry next() { + current = next; + next = next.after; + return current; + } + + @Override + public void remove() { + Entry entry = current; + if (entry != null) { + CacheSegment segment = getCacheSegment(entry.key); + segment.remove(entry.key); + try (ReleasableLock ignored = lruLock.acquire()) { + current = null; + delete(entry, RemovalNotification.RemovalReason.INVALIDATED); + } + } + } + } + + /** + * The cache statistics tracking hits, misses and evictions. These are taken on a best-effort basis meaning that + * they could be out-of-date mid-flight. + * + * @return the current cache statistics + */ + public CacheStats stats() { + long hits = 0; + long misses = 0; + long evictions = 0; + for (int i = 0; i < segments.length; i++) { + hits += segments[i].segmentStats.hits.longValue(); + misses += segments[i].segmentStats.misses.longValue(); + evictions += segments[i].segmentStats.evictions.longValue(); + } + return new CacheStats(hits, misses, evictions); + } + + public static class CacheStats { + private long hits; + private long misses; + private long evictions; + + public CacheStats(long hits, long misses, long evictions) { + this.hits = hits; + this.misses = misses; + this.evictions = evictions; + } + + public long getHits() { + return hits; + } + + public long getMisses() { + return misses; + } + + public long getEvictions() { + return evictions; + } + } + + private boolean promote(Entry entry, long now) { + boolean promoted = true; + try (ReleasableLock ignored = lruLock.acquire()) { + switch (entry.state) { + case DELETED: + promoted = false; + break; + case EXISTING: + relinkAtHead(entry); + break; + case NEW: + linkAtHead(entry); + break; + } + if (promoted) { + evict(now); + } + } + return promoted; + } + + private void evict(long now) { + assert lruLock.isHeldByCurrentThread(); + + while (tail != null && shouldPrune(tail, now)) { + CacheSegment segment = getCacheSegment(tail.key); + Entry entry = tail; + if (segment != null) { + segment.remove(tail.key); + } + delete(entry, RemovalNotification.RemovalReason.EVICTED); + } + } + + private void delete(Entry entry, RemovalNotification.RemovalReason removalReason) { + assert lruLock.isHeldByCurrentThread(); + + if (unlink(entry)) { + removalListener.onRemoval(new RemovalNotification<>(entry.key, entry.value, removalReason)); + } + } + + private boolean shouldPrune(Entry entry, long now) { + return exceedsWeight() || isExpired(entry, now); + } + + private boolean exceedsWeight() { + return maximumWeight != -1 && weight > maximumWeight; + } + + private boolean isExpired(Entry entry, long now) { + return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) || + (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite); + } + + private boolean unlink(Entry entry) { + assert lruLock.isHeldByCurrentThread(); + + if (entry.state == State.EXISTING) { + final Entry before = entry.before; + final Entry after = entry.after; + + if (before == null) { + // removing the head + assert head == entry; + head = after; + if (head != null) { + head.before = null; + } + } else { + // removing inner element + before.after = after; + entry.before = null; + } + + if (after == null) { + // removing tail + assert tail == entry; + tail = before; + if (tail != null) { + tail.after = null; + } + } else { + // removing inner element + after.before = before; + entry.after = null; + } + + count--; + weight -= weigher.applyAsLong(entry.key, entry.value); + entry.state = State.DELETED; + return true; + } else { + return false; + } + } + + private void linkAtHead(Entry entry) { + assert lruLock.isHeldByCurrentThread(); + + Entry h = head; + entry.before = null; + entry.after = head; + head = entry; + if (h == null) { + tail = entry; + } else { + h.before = entry; + } + + count++; + weight += weigher.applyAsLong(entry.key, entry.value); + entry.state = State.EXISTING; + } + + private void relinkAtHead(Entry entry) { + assert lruLock.isHeldByCurrentThread(); + + if (head != entry) { + unlink(entry); + linkAtHead(entry); + } + } + + private CacheSegment getCacheSegment(K key) { + return segments[key.hashCode() & 0xff]; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java new file mode 100644 index 00000000000..ffb0e591180 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import java.util.Objects; +import java.util.function.ToLongBiFunction; + +public class CacheBuilder { + private long maximumWeight = -1; + private long expireAfterAccess = -1; + private long expireAfterWrite = -1; + private ToLongBiFunction weigher; + private RemovalListener removalListener; + + public static CacheBuilder builder() { + return new CacheBuilder<>(); + } + + private CacheBuilder() { + } + + public CacheBuilder setMaximumWeight(long maximumWeight) { + if (maximumWeight < 0) { + throw new IllegalArgumentException("maximumWeight < 0"); + } + this.maximumWeight = maximumWeight; + return this; + } + + public CacheBuilder setExpireAfterAccess(long expireAfterAccess) { + if (expireAfterAccess <= 0) { + throw new IllegalArgumentException("expireAfterAccess <= 0"); + } + this.expireAfterAccess = expireAfterAccess; + return this; + } + + public CacheBuilder setExpireAfterWrite(long expireAfterWrite) { + if (expireAfterWrite <= 0) { + throw new IllegalArgumentException("expireAfterWrite <= 0"); + } + this.expireAfterWrite = expireAfterWrite; + return this; + } + + public CacheBuilder weigher(ToLongBiFunction weigher) { + Objects.requireNonNull(weigher); + this.weigher = weigher; + return this; + } + + public CacheBuilder removalListener(RemovalListener removalListener) { + Objects.requireNonNull(removalListener); + this.removalListener = removalListener; + return this; + } + + public Cache build() { + Cache cache = new Cache(); + if (maximumWeight != -1) { + cache.setMaximumWeight(maximumWeight); + } + if (expireAfterAccess != -1) { + cache.setExpireAfterAccess(expireAfterAccess); + } + if (expireAfterWrite != -1) { + cache.setExpireAfterWrite(expireAfterWrite); + } + if (weigher != null) { + cache.setWeigher(weigher); + } + if (removalListener != null) { + cache.setRemovalListener(removalListener); + } + return cache; + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/SimpleHashFunction.java b/core/src/main/java/org/elasticsearch/common/cache/CacheLoader.java similarity index 65% rename from core/src/main/java/org/elasticsearch/cluster/routing/SimpleHashFunction.java rename to core/src/main/java/org/elasticsearch/common/cache/CacheLoader.java index bbb6a6174da..85636e1e186 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/SimpleHashFunction.java +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheLoader.java @@ -17,20 +17,9 @@ * under the License. */ -package org.elasticsearch.cluster.routing; +package org.elasticsearch.common.cache; -/** - * This class implements a simple hash function based on Java Build-In {@link Object#hashCode()} - */ -public class SimpleHashFunction implements HashFunction { - - @Override - public int hash(String routing) { - return routing.hashCode(); - } - - @Override - public int hash(String type, String id) { - return type.hashCode() + 31 * id.hashCode(); - } +@FunctionalInterface +public interface CacheLoader { + V load(K key) throws Exception; } diff --git a/core/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java b/core/src/main/java/org/elasticsearch/common/cache/RemovalListener.java similarity index 64% rename from core/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java rename to core/src/main/java/org/elasticsearch/common/cache/RemovalListener.java index 82e36cd6efb..ae133000f76 100644 --- a/core/src/main/java/org/elasticsearch/index/LocalNodeIdModule.java +++ b/core/src/main/java/org/elasticsearch/common/cache/RemovalListener.java @@ -17,23 +17,9 @@ * under the License. */ -package org.elasticsearch.index; +package org.elasticsearch.common.cache; -import org.elasticsearch.common.inject.AbstractModule; - -/** - * - */ -public class LocalNodeIdModule extends AbstractModule { - - private final String localNodeId; - - public LocalNodeIdModule(String localNodeId) { - this.localNodeId = localNodeId; - } - - @Override - protected void configure() { - bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId); - } -} +@FunctionalInterface +public interface RemovalListener { + void onRemoval(RemovalNotification notification); +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/LocalNodeId.java b/core/src/main/java/org/elasticsearch/common/cache/RemovalNotification.java similarity index 56% rename from core/src/main/java/org/elasticsearch/index/LocalNodeId.java rename to core/src/main/java/org/elasticsearch/common/cache/RemovalNotification.java index a045636a688..afea5a54480 100644 --- a/core/src/main/java/org/elasticsearch/index/LocalNodeId.java +++ b/core/src/main/java/org/elasticsearch/common/cache/RemovalNotification.java @@ -17,24 +17,30 @@ * under the License. */ -package org.elasticsearch.index; +package org.elasticsearch.common.cache; -import org.elasticsearch.common.inject.BindingAnnotation; +public class RemovalNotification { + public enum RemovalReason {REPLACED, INVALIDATED, EVICTED} -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; + private final K key; + private final V value; + private final RemovalReason removalReason; -import static java.lang.annotation.ElementType.FIELD; -import static java.lang.annotation.ElementType.PARAMETER; -import static java.lang.annotation.RetentionPolicy.RUNTIME; + public RemovalNotification(K key, V value, RemovalReason removalReason) { + this.key = key; + this.value = value; + this.removalReason = removalReason; + } -/** - * - */ -@BindingAnnotation -@Target({FIELD, PARAMETER}) -@Retention(RUNTIME) -@Documented -public @interface LocalNodeId { + public K getKey() { + return key; + } + + public V getValue() { + return value; + } + + public RemovalReason getRemovalReason() { + return removalReason; + } } diff --git a/core/src/main/java/org/elasticsearch/common/collect/EvictingQueue.java b/core/src/main/java/org/elasticsearch/common/collect/EvictingQueue.java new file mode 100644 index 00000000000..51cc08d0209 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/collect/EvictingQueue.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.collect; + +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.Iterator; +import java.util.Queue; + +/** + * An {@code EvictingQueue} is a non-blocking queue which is limited to a maximum size; when new elements are added to a + * full queue, elements are evicted from the head of the queue to accommodate the new elements. + * + * @param The type of elements in the queue. + */ +public class EvictingQueue implements Queue { + private final int maximumSize; + private final ArrayDeque queue; + + /** + * Construct a new {@code EvictingQueue} that holds {@code maximumSize} elements. + * + * @param maximumSize The maximum number of elements that the queue can hold + * @throws IllegalArgumentException if {@code maximumSize} is less than zero + */ + public EvictingQueue(int maximumSize) { + if (maximumSize < 0) { + throw new IllegalArgumentException("maximumSize < 0"); + } + this.maximumSize = maximumSize; + this.queue = new ArrayDeque<>(maximumSize); + } + + /** + * @return the number of additional elements that the queue can accommodate before evictions occur + */ + public int remainingCapacity() { + return this.maximumSize - this.size(); + } + + /** + * Add the given element to the queue, possibly forcing an eviction from the head if {@link #remainingCapacity()} is + * zero. + * + * @param t the element to add + * @return true if the element was added (always the case for {@code EvictingQueue} + */ + @Override + public boolean add(T t) { + if (maximumSize == 0) { + return true; + } + if (queue.size() == maximumSize) { + queue.remove(); + } + queue.add(t); + return true; + } + + /** + * @see #add(Object) + */ + @Override + public boolean offer(T t) { + return add(t); + } + + @Override + public T remove() { + return queue.remove(); + } + + + @Override + public T poll() { + return queue.poll(); + } + + @Override + public T element() { + return queue.element(); + } + + @Override + public T peek() { + return queue.peek(); + } + + @Override + public int size() { + return queue.size(); + } + + @Override + public boolean isEmpty() { + return queue.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return queue.contains(o); + } + + @Override + public Iterator iterator() { + return queue.iterator(); + } + + @Override + public Object[] toArray() { + return queue.toArray(); + } + + @Override + public T1[] toArray(T1[] a) { + return queue.toArray(a); + } + + @Override + public boolean remove(Object o) { + return queue.remove(o); + } + + @Override + public boolean containsAll(Collection c) { + return queue.containsAll(c); + } + + /** + * Add the given elements to the queue, possibly forcing evictions from the head if {@link #remainingCapacity()} is + * zero or becomes zero during the execution of this method. + * + * @param c the collection of elements to add + * @return true if any elements were added to the queue + */ + @Override + public boolean addAll(Collection c) { + boolean modified = false; + for (T e : c) + if (add(e)) + modified = true; + return modified; + } + + @Override + public boolean removeAll(Collection c) { + return queue.removeAll(c); + } + + @Override + public boolean retainAll(Collection c) { + return queue.retainAll(c); + } + + @Override + public void clear() { + queue.clear(); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/collect/Iterators.java b/core/src/main/java/org/elasticsearch/common/collect/Iterators.java new file mode 100644 index 00000000000..34546120b0a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.collect; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +public class Iterators { + public static Iterator concat(Iterator... iterators) { + if (iterators == null) { + throw new NullPointerException("iterators"); + } + + return new ConcatenatedIterator<>(iterators); + } + + static class ConcatenatedIterator implements Iterator { + private final Iterator[] iterators; + private int index = 0; + + public ConcatenatedIterator(Iterator... iterators) { + if (iterators == null) { + throw new NullPointerException("iterators"); + } + for (int i = 0; i < iterators.length; i++) { + if (iterators[i] == null) { + throw new NullPointerException("iterators[" + i + "]"); + } + } + this.iterators = iterators; + } + + @Override + public boolean hasNext() { + boolean hasNext = false; + while (index < iterators.length && !(hasNext = iterators[index].hasNext())) { + index++; + } + + return hasNext; + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return iterators[index].next(); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index c50b85a835c..0bc9455bea3 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -19,12 +19,6 @@ package org.elasticsearch.common.geo; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; - import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.XGeoHashUtils; import org.apache.lucene.util.XGeoUtils; @@ -32,15 +26,12 @@ import org.apache.lucene.util.XGeoUtils; /** * */ -public final class GeoPoint implements Writeable { +public final class GeoPoint { private double lat; private double lon; private final static double TOLERANCE = XGeoUtils.TOLERANCE; - // for serialization purposes - private static final GeoPoint PROTOTYPE = new GeoPoint(Double.NaN, Double.NaN); - public GeoPoint() { } @@ -179,21 +170,4 @@ public final class GeoPoint implements Writeable { public static GeoPoint fromIndexLong(long indexLong) { return new GeoPoint().resetFromIndexHash(indexLong); } - - @Override - public GeoPoint readFrom(StreamInput in) throws IOException { - double lat = in.readDouble(); - double lon = in.readDouble(); - return new GeoPoint(lat, lon); - } - - public static GeoPoint readGeoPointFrom(StreamInput in) throws IOException { - return PROTOTYPE.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeDouble(lat); - out.writeDouble(lon); - } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java b/core/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java index aad0c3a5ef3..bb057d6040d 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/Nullability.java @@ -1,3 +1,19 @@ +/* + * Copyright (C) 2010 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.elasticsearch.common.inject.internal; import java.lang.annotation.Annotation; diff --git a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java index cb5ca5fec64..e53e7a73eb7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java +++ b/core/src/main/java/org/elasticsearch/common/io/FileSystemUtils.java @@ -19,9 +19,8 @@ package org.elasticsearch.common.io; -import com.google.common.collect.Iterators; - import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.logging.ESLogger; import java.io.BufferedReader; @@ -35,6 +34,7 @@ import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.StreamSupport; import static java.nio.file.FileVisitResult.CONTINUE; import static java.nio.file.FileVisitResult.SKIP_SUBTREE; @@ -328,7 +328,7 @@ public final class FileSystemUtils { */ public static Path[] files(Path from, DirectoryStream.Filter filter) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(from, filter)) { - return Iterators.toArray(stream.iterator(), Path.class); + return toArray(stream); } } @@ -337,7 +337,7 @@ public final class FileSystemUtils { */ public static Path[] files(Path directory) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(directory)) { - return Iterators.toArray(stream.iterator(), Path.class); + return toArray(stream); } } @@ -346,8 +346,12 @@ public final class FileSystemUtils { */ public static Path[] files(Path directory, String glob) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(directory, glob)) { - return Iterators.toArray(stream.iterator(), Path.class); + return toArray(stream); } } + private static Path[] toArray(DirectoryStream stream) { + return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 17d99951209..a6fc0914dbe 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; @@ -336,19 +337,6 @@ public abstract class StreamInput extends InputStream { @Override public abstract void close() throws IOException; -// // IS -// -// @Override public int read() throws IOException { -// return readByte(); -// } -// -// // Here, we assume that we always can read the full byte array -// -// @Override public int read(byte[] b, int off, int len) throws IOException { -// readBytes(b, off, len); -// return len; -// } - public String[] readStringArray() throws IOException { int size = readVInt(); if (size == 0) { @@ -449,11 +437,20 @@ public abstract class StreamInput extends InputStream { return readDoubleArray(); case 21: return readBytesRef(); + case 22: + return readGeoPoint(); default: throw new IOException("Can't read unknown type [" + type + "]"); } } + /** + * Reads a {@link GeoPoint} from this stream input + */ + public GeoPoint readGeoPoint() throws IOException { + return new GeoPoint(readDouble(), readDouble()); + } + public int[] readIntArray() throws IOException { int length = readVInt(); int[] values = new int[length]; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 16128e40c64..3e4aabb6284 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -30,6 +30,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -422,6 +423,9 @@ public abstract class StreamOutput extends OutputStream { } else if (value instanceof BytesRef) { writeByte((byte) 21); writeBytesRef((BytesRef) value); + } else if (type == GeoPoint.class) { + writeByte((byte) 22); + writeGeoPoint((GeoPoint) value); } else { throw new IOException("Can't write type [" + type + "]"); } @@ -467,14 +471,6 @@ public abstract class StreamOutput extends OutputStream { } } - private static int parseIntSafe(String val, int defaultVal) { - try { - return Integer.parseInt(val); - } catch (NumberFormatException ex) { - return defaultVal; - } - } - public void writeThrowable(Throwable throwable) throws IOException { if (throwable == null) { writeBoolean(false); @@ -596,4 +592,12 @@ public abstract class StreamOutput extends OutputStream { public void writeScoreFunction(ScoreFunctionBuilder scoreFunctionBuilder) throws IOException { writeNamedWriteable(scoreFunctionBuilder); } + + /** + * Writes the given {@link GeoPoint} to the stream + */ + public void writeGeoPoint(GeoPoint geoPoint) throws IOException { + writeDouble(geoPoint.lat()); + writeDouble(geoPoint.lon()); + } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 060482e2e8a..3aaaf9677b8 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -46,14 +46,11 @@ import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.text.ParseException; import java.util.*; -import static org.elasticsearch.common.lucene.search.NoopCollector.NOOP_COLLECTOR; - /** * */ @@ -229,27 +226,6 @@ public class Lucene { }.run(); } - public static long count(IndexSearcher searcher, Query query) throws IOException { - return searcher.count(query); - } - - /** - * Performs a count on the searcher for query. Terminates - * early when the count has reached terminateAfter - */ - public static long count(IndexSearcher searcher, Query query, int terminateAfterCount) throws IOException { - EarlyTerminatingCollector countCollector = createCountBasedEarlyTerminatingCollector(terminateAfterCount); - countWithEarlyTermination(searcher, query, countCollector); - return countCollector.count(); - } - - /** - * Creates count based early termination collector with a threshold of maxCountHits - */ - public final static EarlyTerminatingCollector createCountBasedEarlyTerminatingCollector(int maxCountHits) { - return new EarlyTerminatingCollector(maxCountHits); - } - /** * Wraps delegate with count based early termination collector with a threshold of maxCountHits */ @@ -265,99 +241,27 @@ public class Lucene { } /** - * Performs an exists (count > 0) query on the searcher for query - * with filter using the given collector - * - * The collector can be instantiated using Lucene.createExistsCollector() + * Check whether there is one or more documents matching the provided query. */ - public static boolean exists(IndexSearcher searcher, Query query, Filter filter, - EarlyTerminatingCollector collector) throws IOException { - collector.reset(); - countWithEarlyTermination(searcher, filter, query, collector); - return collector.exists(); - } - - - /** - * Performs an exists (count > 0) query on the searcher for query - * using the given collector - * - * The collector can be instantiated using Lucene.createExistsCollector() - */ - public static boolean exists(IndexSearcher searcher, Query query, EarlyTerminatingCollector collector) throws IOException { - collector.reset(); - countWithEarlyTermination(searcher, query, collector); - return collector.exists(); - } - - /** - * Calls countWithEarlyTermination(searcher, null, query, collector) - */ - public static boolean countWithEarlyTermination(IndexSearcher searcher, Query query, - EarlyTerminatingCollector collector) throws IOException { - return countWithEarlyTermination(searcher, null, query, collector); - } - - /** - * Performs a count on query and filter with early termination using searcher. - * The early termination threshold is specified by the provided collector - */ - public static boolean countWithEarlyTermination(IndexSearcher searcher, Filter filter, Query query, - EarlyTerminatingCollector collector) throws IOException { - try { - if (filter == null) { - searcher.search(query, collector); - } else { - searcher.search(query, filter, collector); + public static boolean exists(IndexSearcher searcher, Query query) throws IOException { + final Weight weight = searcher.createNormalizedWeight(query, false); + // the scorer API should be more efficient at stopping after the first + // match than the bulk scorer API + for (LeafReaderContext context : searcher.getIndexReader().leaves()) { + final Scorer scorer = weight.scorer(context); + if (scorer == null) { + continue; + } + final Bits liveDocs = context.reader().getLiveDocs(); + for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + return true; + } } - } catch (EarlyTerminationException e) { - // early termination - return true; } return false; } - /** - * Performs an exists (count > 0) query on the searcher from the searchContext for query - * using the given collector - * - * The collector can be instantiated using Lucene.createExistsCollector() - */ - public static boolean exists(SearchContext searchContext, Query query, EarlyTerminatingCollector collector) throws IOException { - collector.reset(); - try { - searchContext.searcher().search(query, collector); - } catch (EarlyTerminationException e) { - // ignore, just early termination... - } finally { - searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); - } - return collector.exists(); - } - - /** - * Creates an {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminatingCollector} - * with a threshold of 1 - */ - public final static EarlyTerminatingCollector createExistsCollector() { - return createCountBasedEarlyTerminatingCollector(1); - } - - /** - * Closes the index writer, returning false if it failed to close. - */ - public static boolean safeClose(IndexWriter writer) { - if (writer == null) { - return true; - } - try { - writer.close(); - return true; - } catch (Throwable e) { - return false; - } - } - public static TopDocs readTopDocs(StreamInput in) throws IOException { if (in.readBoolean()) { int totalHits = in.readVInt(); @@ -612,19 +516,11 @@ public class Lucene { private int count = 0; private LeafCollector leafCollector; - EarlyTerminatingCollector(int maxCountHits) { - this.maxCountHits = maxCountHits; - this.delegate = NOOP_COLLECTOR; - } - EarlyTerminatingCollector(final Collector delegate, int maxCountHits) { this.maxCountHits = maxCountHits; - this.delegate = (delegate == null) ? NOOP_COLLECTOR : delegate; + this.delegate = Objects.requireNonNull(delegate); } - public void reset() { - count = 0; - } public int count() { return count; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java deleted file mode 100644 index 99845ea18d6..00000000000 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/NoopCollector.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.SimpleCollector; - -import java.io.IOException; - -/** - * - */ -public class NoopCollector extends SimpleCollector { - - public static final NoopCollector NOOP_COLLECTOR = new NoopCollector(); - - @Override - public void setScorer(Scorer scorer) throws IOException { - } - - @Override - public void collect(int doc) throws IOException { - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - } - - @Override - public boolean needsScores() { - return false; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java index 77eb218a0b6..55586d8fbc2 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/Versions.java @@ -33,10 +33,24 @@ import java.util.concurrent.ConcurrentMap; /** Utility class to resolve the Lucene doc ID and version for a given uid. */ public class Versions { - public static final long MATCH_ANY = -3L; // Version was not specified by the user + /** used to indicate the write operation should succeed regardless of current version **/ + public static final long MATCH_ANY = -3L; + + /** indicates that the current document was not found in lucene and in the version map */ public static final long NOT_FOUND = -1L; + + /** + * used when the document is old and doesn't contain any version information in the index + * see {@link PerThreadIDAndVersionLookup#lookup(org.apache.lucene.util.BytesRef)} + */ public static final long NOT_SET = -2L; + /** + * used to indicate that the write operation should be executed if the document is currently deleted + * i.e., not found in the index and/or found as deleted (with version) in the version map + */ + public static final long MATCH_DELETED = -4L; + // TODO: is there somewhere else we can store these? private static final ConcurrentMap> lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); diff --git a/core/src/main/java/org/elasticsearch/common/network/InetAddresses.java b/core/src/main/java/org/elasticsearch/common/network/InetAddresses.java new file mode 100644 index 00000000000..4d3d140ae63 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/network/InetAddresses.java @@ -0,0 +1,357 @@ +/* + * Copyright (C) 2008 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.elasticsearch.common.network; + +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Locale; + +public class InetAddresses { + private static int IPV4_PART_COUNT = 4; + private static int IPV6_PART_COUNT = 8; + + public static boolean isInetAddress(String ipString) { + return ipStringToBytes(ipString) != null; + } + + private static byte[] ipStringToBytes(String ipString) { + // Make a first pass to categorize the characters in this string. + boolean hasColon = false; + boolean hasDot = false; + for (int i = 0; i < ipString.length(); i++) { + char c = ipString.charAt(i); + if (c == '.') { + hasDot = true; + } else if (c == ':') { + if (hasDot) { + return null; // Colons must not appear after dots. + } + hasColon = true; + } else if (Character.digit(c, 16) == -1) { + return null; // Everything else must be a decimal or hex digit. + } + } + + // Now decide which address family to parse. + if (hasColon) { + if (hasDot) { + ipString = convertDottedQuadToHex(ipString); + if (ipString == null) { + return null; + } + } + return textToNumericFormatV6(ipString); + } else if (hasDot) { + return textToNumericFormatV4(ipString); + } + return null; + } + + private static String convertDottedQuadToHex(String ipString) { + int lastColon = ipString.lastIndexOf(':'); + String initialPart = ipString.substring(0, lastColon + 1); + String dottedQuad = ipString.substring(lastColon + 1); + byte[] quad = textToNumericFormatV4(dottedQuad); + if (quad == null) { + return null; + } + String penultimate = Integer.toHexString(((quad[0] & 0xff) << 8) | (quad[1] & 0xff)); + String ultimate = Integer.toHexString(((quad[2] & 0xff) << 8) | (quad[3] & 0xff)); + return initialPart + penultimate + ":" + ultimate; + } + + private static byte[] textToNumericFormatV4(String ipString) { + String[] address = ipString.split("\\.", IPV4_PART_COUNT + 1); + if (address.length != IPV4_PART_COUNT) { + return null; + } + + byte[] bytes = new byte[IPV4_PART_COUNT]; + try { + for (int i = 0; i < bytes.length; i++) { + bytes[i] = parseOctet(address[i]); + } + } catch (NumberFormatException ex) { + return null; + } + + return bytes; + } + + private static byte parseOctet(String ipPart) { + // Note: we already verified that this string contains only hex digits. + int octet = Integer.parseInt(ipPart); + // Disallow leading zeroes, because no clear standard exists on + // whether these should be interpreted as decimal or octal. + if (octet > 255 || (ipPart.startsWith("0") && ipPart.length() > 1)) { + throw new NumberFormatException(); + } + return (byte) octet; + } + + private static byte[] textToNumericFormatV6(String ipString) { + // An address can have [2..8] colons, and N colons make N+1 parts. + String[] parts = ipString.split(":", IPV6_PART_COUNT + 2); + if (parts.length < 3 || parts.length > IPV6_PART_COUNT + 1) { + return null; + } + + // Disregarding the endpoints, find "::" with nothing in between. + // This indicates that a run of zeroes has been skipped. + int skipIndex = -1; + for (int i = 1; i < parts.length - 1; i++) { + if (parts[i].length() == 0) { + if (skipIndex >= 0) { + return null; // Can't have more than one :: + } + skipIndex = i; + } + } + + int partsHi; // Number of parts to copy from above/before the "::" + int partsLo; // Number of parts to copy from below/after the "::" + if (skipIndex >= 0) { + // If we found a "::", then check if it also covers the endpoints. + partsHi = skipIndex; + partsLo = parts.length - skipIndex - 1; + if (parts[0].length() == 0 && --partsHi != 0) { + return null; // ^: requires ^:: + } + if (parts[parts.length - 1].length() == 0 && --partsLo != 0) { + return null; // :$ requires ::$ + } + } else { + // Otherwise, allocate the entire address to partsHi. The endpoints + // could still be empty, but parseHextet() will check for that. + partsHi = parts.length; + partsLo = 0; + } + + // If we found a ::, then we must have skipped at least one part. + // Otherwise, we must have exactly the right number of parts. + int partsSkipped = IPV6_PART_COUNT - (partsHi + partsLo); + if (!(skipIndex >= 0 ? partsSkipped >= 1 : partsSkipped == 0)) { + return null; + } + + // Now parse the hextets into a byte array. + ByteBuffer rawBytes = ByteBuffer.allocate(2 * IPV6_PART_COUNT); + try { + for (int i = 0; i < partsHi; i++) { + rawBytes.putShort(parseHextet(parts[i])); + } + for (int i = 0; i < partsSkipped; i++) { + rawBytes.putShort((short) 0); + } + for (int i = partsLo; i > 0; i--) { + rawBytes.putShort(parseHextet(parts[parts.length - i])); + } + } catch (NumberFormatException ex) { + return null; + } + return rawBytes.array(); + } + + private static short parseHextet(String ipPart) { + // Note: we already verified that this string contains only hex digits. + int hextet = Integer.parseInt(ipPart, 16); + if (hextet > 0xffff) { + throw new NumberFormatException(); + } + return (short) hextet; + } + + /** + * Returns the string representation of an {@link InetAddress} suitable + * for inclusion in a URI. + * + *

    For IPv4 addresses, this is identical to + * {@link InetAddress#getHostAddress()}, but for IPv6 addresses it + * compresses zeroes and surrounds the text with square brackets; for example + * {@code "[2001:db8::1]"}. + * + *

    Per section 3.2.2 of + * http://tools.ietf.org/html/rfc3986, + * a URI containing an IPv6 string literal is of the form + * {@code "http://[2001:db8::1]:8888/index.html"}. + * + *

    Use of either {@link InetAddresses#toAddrString}, + * {@link InetAddress#getHostAddress()}, or this method is recommended over + * {@link InetAddress#toString()} when an IP address string literal is + * desired. This is because {@link InetAddress#toString()} prints the + * hostname and the IP address string joined by a "/". + * + * @param ip {@link InetAddress} to be converted to URI string literal + * @return {@code String} containing URI-safe string literal + */ + public static String toUriString(InetAddress ip) { + if (ip instanceof Inet6Address) { + return "[" + toAddrString(ip) + "]"; + } + return toAddrString(ip); + } + + /** + * Returns the string representation of an {@link InetAddress}. + * + *

    For IPv4 addresses, this is identical to + * {@link InetAddress#getHostAddress()}, but for IPv6 addresses, the output + * follows RFC 5952 + * section 4. The main difference is that this method uses "::" for zero + * compression, while Java's version uses the uncompressed form. + * + *

    This method uses hexadecimal for all IPv6 addresses, including + * IPv4-mapped IPv6 addresses such as "::c000:201". The output does not + * include a Scope ID. + * + * @param ip {@link InetAddress} to be converted to an address string + * @return {@code String} containing the text-formatted IP address + * @since 10.0 + */ + public static String toAddrString(InetAddress ip) { + if (ip == null) { + throw new NullPointerException("ip"); + } + if (ip instanceof Inet4Address) { + // For IPv4, Java's formatting is good enough. + byte[] bytes = ip.getAddress(); + return (bytes[0] & 0xff) + "." + (bytes[1] & 0xff) + "." + (bytes[2] & 0xff) + "." + (bytes[3] & 0xff); + } + if (!(ip instanceof Inet6Address)) { + throw new IllegalArgumentException("ip"); + } + byte[] bytes = ip.getAddress(); + int[] hextets = new int[IPV6_PART_COUNT]; + for (int i = 0; i < hextets.length; i++) { + hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255; + } + compressLongestRunOfZeroes(hextets); + return hextetsToIPv6String(hextets); + } + + /** + * Identify and mark the longest run of zeroes in an IPv6 address. + * + *

    Only runs of two or more hextets are considered. In case of a tie, the + * leftmost run wins. If a qualifying run is found, its hextets are replaced + * by the sentinel value -1. + * + * @param hextets {@code int[]} mutable array of eight 16-bit hextets + */ + private static void compressLongestRunOfZeroes(int[] hextets) { + int bestRunStart = -1; + int bestRunLength = -1; + int runStart = -1; + for (int i = 0; i < hextets.length + 1; i++) { + if (i < hextets.length && hextets[i] == 0) { + if (runStart < 0) { + runStart = i; + } + } else if (runStart >= 0) { + int runLength = i - runStart; + if (runLength > bestRunLength) { + bestRunStart = runStart; + bestRunLength = runLength; + } + runStart = -1; + } + } + if (bestRunLength >= 2) { + Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1); + } + } + + /** + * Convert a list of hextets into a human-readable IPv6 address. + * + *

    In order for "::" compression to work, the input should contain negative + * sentinel values in place of the elided zeroes. + * + * @param hextets {@code int[]} array of eight 16-bit hextets, or -1s + */ + private static String hextetsToIPv6String(int[] hextets) { + /* + * While scanning the array, handle these state transitions: + * start->num => "num" start->gap => "::" + * num->num => ":num" num->gap => "::" + * gap->num => "num" gap->gap => "" + */ + StringBuilder buf = new StringBuilder(39); + boolean lastWasNumber = false; + for (int i = 0; i < hextets.length; i++) { + boolean thisIsNumber = hextets[i] >= 0; + if (thisIsNumber) { + if (lastWasNumber) { + buf.append(':'); + } + buf.append(Integer.toHexString(hextets[i])); + } else { + if (i == 0 || lastWasNumber) { + buf.append("::"); + } + } + lastWasNumber = thisIsNumber; + } + return buf.toString(); + } + + /** + * Returns the {@link InetAddress} having the given string representation. + * + *

    This deliberately avoids all nameservice lookups (e.g. no DNS). + * + * @param ipString {@code String} containing an IPv4 or IPv6 string literal, e.g. + * {@code "192.168.0.1"} or {@code "2001:db8::1"} + * @return {@link InetAddress} representing the argument + * @throws IllegalArgumentException if the argument is not a valid IP string literal + */ + public static InetAddress forString(String ipString) { + byte[] addr = ipStringToBytes(ipString); + + // The argument was malformed, i.e. not an IP string literal. + if (addr == null) { + throw new IllegalArgumentException(String.format(Locale.ROOT, "'%s' is not an IP string literal.", ipString)); + } + + return bytesToInetAddress(addr); + } + + /** + * Convert a byte array into an InetAddress. + * + * {@link InetAddress#getByAddress} is documented as throwing a checked + * exception "if IP address is of illegal length." We replace it with + * an unchecked exception, for use by callers who already know that addr + * is an array of length 4 or 16. + * + * @param addr the raw 4-byte or 16-byte IP address in big-endian order + * @return an InetAddress object created from the raw IP address + */ + private static InetAddress bytesToInetAddress(byte[] addr) { + try { + return InetAddress.getByAddress(addr); + } catch (UnknownHostException e) { + throw new AssertionError(e); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkAddress.java b/core/src/main/java/org/elasticsearch/common/network/NetworkAddress.java index 91eda6bb624..3dcaeeb1f0e 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkAddress.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkAddress.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.network; -import com.google.common.net.InetAddresses; - import org.elasticsearch.common.SuppressForbidden; import java.net.Inet6Address; diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 8eff70e7bd8..cd46d1416f4 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; @@ -73,7 +72,7 @@ public class NetworkService extends AbstractComponent { /** * Resolves a custom value handling, return null if can't handle it. */ - InetAddress[] resolveIfPossible(String value); + InetAddress[] resolveIfPossible(String value) throws IOException; } private final List customNameResolvers = new CopyOnWriteArrayList<>(); @@ -162,7 +161,7 @@ public class NetworkService extends AbstractComponent { return address; } - private InetAddress[] resolveInetAddress(String host) throws UnknownHostException, IOException { + private InetAddress[] resolveInetAddress(String host) throws IOException { if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) { host = host.substring(1, host.length() - 1); // allow custom resolvers to have special names diff --git a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 243c9443f08..a36c37b22e9 100644 --- a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -23,16 +23,12 @@ import com.carrotsearch.hppc.DoubleArrayList; import com.carrotsearch.hppc.FloatArrayList; import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.ObjectArrayList; -import com.google.common.collect.Iterators; - import org.apache.lucene.util.*; import java.util.*; /** Collections-related utility methods. */ -public enum CollectionUtils { - CollectionUtils; - +public class CollectionUtils { public static void sort(LongArrayList list) { sort(list.buffer, list.size()); } @@ -367,13 +363,6 @@ public enum CollectionUtils { } - /** - * Combines multiple iterators into a single iterator. - */ - public static Iterator concat(Iterator... iterators) { - return Iterators.concat(iterators); - } - public static ArrayList iterableAsArrayList(Iterable elements) { if (elements == null) { throw new NullPointerException("elements"); diff --git a/core/src/main/java/org/elasticsearch/index/IndexServicesProvider.java b/core/src/main/java/org/elasticsearch/index/IndexServicesProvider.java index fe8428425e2..0d5c3cb12c1 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexServicesProvider.java +++ b/core/src/main/java/org/elasticsearch/index/IndexServicesProvider.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.index; import org.elasticsearch.common.Nullable; @@ -34,6 +35,7 @@ import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; /** @@ -58,9 +60,10 @@ public final class IndexServicesProvider { private final EngineFactory factory; private final BigArrays bigArrays; private final IndexSearcherWrapper indexSearcherWrapper; + private final IndexingMemoryController indexingMemoryController; @Inject - public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper) { + public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingMemoryController indexingMemoryController) { this.indicesLifecycle = indicesLifecycle; this.threadPool = threadPool; this.mapperService = mapperService; @@ -76,6 +79,7 @@ public final class IndexServicesProvider { this.factory = factory; this.bigArrays = bigArrays; this.indexSearcherWrapper = indexSearcherWrapper; + this.indexingMemoryController = indexingMemoryController; } public IndicesLifecycle getIndicesLifecycle() { @@ -134,5 +138,11 @@ public final class IndexServicesProvider { return bigArrays; } - public IndexSearcherWrapper getIndexSearcherWrapper() { return indexSearcherWrapper; } + public IndexSearcherWrapper getIndexSearcherWrapper() { + return indexSearcherWrapper; + } + + public IndexingMemoryController getIndexingMemoryController() { + return indexingMemoryController; + } } diff --git a/core/src/main/java/org/elasticsearch/index/VersionType.java b/core/src/main/java/org/elasticsearch/index/VersionType.java index a5d8cae2453..b8f998b9704 100644 --- a/core/src/main/java/org/elasticsearch/index/VersionType.java +++ b/core/src/main/java/org/elasticsearch/index/VersionType.java @@ -31,24 +31,37 @@ import java.io.IOException; public enum VersionType implements Writeable { INTERNAL((byte) 0) { @Override - public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) { - return isVersionConflict(currentVersion, expectedVersion); + public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + return isVersionConflict(currentVersion, expectedVersion, deleted); + } + + @Override + public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + if (expectedVersion == Versions.MATCH_DELETED) { + return "document already exists (current version [" + currentVersion + "])"; + } + return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; } @Override public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) { - return isVersionConflict(currentVersion, expectedVersion); + return isVersionConflict(currentVersion, expectedVersion, false); } - private boolean isVersionConflict(long currentVersion, long expectedVersion) { + @Override + public String explainConflictForReads(long currentVersion, long expectedVersion) { + return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; + } + + private boolean isVersionConflict(long currentVersion, long expectedVersion, boolean deleted) { if (currentVersion == Versions.NOT_SET) { return false; } if (expectedVersion == Versions.MATCH_ANY) { return false; } - if (currentVersion == Versions.NOT_FOUND) { - return true; + if (expectedVersion == Versions.MATCH_DELETED) { + return deleted == false; } if (currentVersion != expectedVersion) { return true; @@ -63,8 +76,7 @@ public enum VersionType implements Writeable { @Override public boolean validateVersionForWrites(long version) { - // not allowing Versions.NOT_FOUND as it is not a valid input value. - return version > 0L || version == Versions.MATCH_ANY; + return version > 0L || version == Versions.MATCH_ANY || version == Versions.MATCH_DELETED; } @Override @@ -82,7 +94,7 @@ public enum VersionType implements Writeable { }, EXTERNAL((byte) 1) { @Override - public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) { + public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { if (currentVersion == Versions.NOT_SET) { return false; } @@ -98,6 +110,11 @@ public enum VersionType implements Writeable { return false; } + @Override + public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + return "current version [" + currentVersion + "] is higher or equal to the one provided [" + expectedVersion + "]"; + } + @Override public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) { if (currentVersion == Versions.NOT_SET) { @@ -115,6 +132,11 @@ public enum VersionType implements Writeable { return false; } + @Override + public String explainConflictForReads(long currentVersion, long expectedVersion) { + return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; + } + @Override public long updateVersion(long currentVersion, long expectedVersion) { return expectedVersion; @@ -133,7 +155,7 @@ public enum VersionType implements Writeable { }, EXTERNAL_GTE((byte) 2) { @Override - public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) { + public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { if (currentVersion == Versions.NOT_SET) { return false; } @@ -149,6 +171,11 @@ public enum VersionType implements Writeable { return false; } + @Override + public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + return "current version [" + currentVersion + "] is higher than the one provided [" + expectedVersion + "]"; + } + @Override public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) { if (currentVersion == Versions.NOT_SET) { @@ -166,6 +193,11 @@ public enum VersionType implements Writeable { return false; } + @Override + public String explainConflictForReads(long currentVersion, long expectedVersion) { + return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; + } + @Override public long updateVersion(long currentVersion, long expectedVersion) { return expectedVersion; @@ -187,7 +219,7 @@ public enum VersionType implements Writeable { */ FORCE((byte) 3) { @Override - public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) { + public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { if (currentVersion == Versions.NOT_SET) { return false; } @@ -195,16 +227,26 @@ public enum VersionType implements Writeable { return false; } if (expectedVersion == Versions.MATCH_ANY) { - return true; + throw new IllegalStateException("you must specify a version when use VersionType.FORCE"); } return false; } + @Override + public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + throw new AssertionError("VersionType.FORCE should never result in a write conflict"); + } + @Override public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) { return false; } + @Override + public String explainConflictForReads(long currentVersion, long expectedVersion) { + throw new AssertionError("VersionType.FORCE should never result in a read conflict"); + } + @Override public long updateVersion(long currentVersion, long expectedVersion) { return expectedVersion; @@ -237,17 +279,46 @@ public enum VersionType implements Writeable { /** * Checks whether the current version conflicts with the expected version, based on the current version type. * + * @param currentVersion the current version for the document + * @param expectedVersion the version specified for the write operation + * @param deleted true if the document is currently deleted (note that #currentVersion will typically be + * {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted * @return true if versions conflict false o.w. */ - public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion); + public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted); + + + /** + * Returns a human readable explanation for a version conflict on write. + * + * Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true; + * + * @param currentVersion the current version for the document + * @param expectedVersion the version specified for the write operation + * @param deleted true if the document is currently deleted (note that #currentVersion will typically be + * {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted + */ + public abstract String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted); /** * Checks whether the current version conflicts with the expected version, based on the current version type. * + * @param currentVersion the current version for the document + * @param expectedVersion the version specified for the read operation * @return true if versions conflict false o.w. */ public abstract boolean isVersionConflictForReads(long currentVersion, long expectedVersion); + /** + * Returns a human readable explanation for a version conflict on read. + * + * Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true; + * + * @param currentVersion the current version for the document + * @param expectedVersion the version specified for the read operation + */ + public abstract String explainConflictForReads(long currentVersion, long expectedVersion); + /** * Returns the new version for a document, based on its current one and the specified in the request * diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 30c0905dc0f..f2b7ba8e131 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -19,11 +19,6 @@ package org.elasticsearch.index.cache.bitset; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; - import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -38,6 +33,10 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.cache.RemovalListener; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; @@ -58,10 +57,11 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; import java.util.HashSet; -import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; /** * This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time. @@ -94,10 +94,11 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea public BitsetFilterCache(Index index, @IndexSettings Settings indexSettings) { super(index, indexSettings); this.loadRandomAccessFiltersEagerly = indexSettings.getAsBoolean(LOAD_RANDOM_ACCESS_FILTERS_EAGERLY, true); - this.loadedFilters = CacheBuilder.newBuilder().removalListener(this).build(); + this.loadedFilters = CacheBuilder.>builder().removalListener(this).build(); this.warmer = new BitSetProducerWarmer(); } + @Inject(optional = true) public void setIndicesWarmer(IndicesWarmer indicesWarmer) { this.indicesWarmer = indicesWarmer; @@ -144,14 +145,12 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException { final Object coreCacheReader = context.reader().getCoreCacheKey(); final ShardId shardId = ShardUtils.extractShardId(context.reader()); - Cache filterToFbs = loadedFilters.get(coreCacheReader, new Callable>() { - @Override - public Cache call() throws Exception { - context.reader().addCoreClosedListener(BitsetFilterCache.this); - return CacheBuilder.newBuilder().build(); - } + Cache filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> { + context.reader().addCoreClosedListener(BitsetFilterCache.this); + return CacheBuilder.builder().build(); }); - return filterToFbs.get(query, () -> { + + return filterToFbs.computeIfAbsent(query, key -> { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); @@ -172,8 +171,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea @Override public void onRemoval(RemovalNotification> notification) { - Object key = notification.getKey(); - if (key == null) { + if (notification.getKey() == null) { return; } @@ -182,7 +180,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea return; } - for (Value value : valueCache.asMap().values()) { + for (Value value : valueCache.values()) { listener.onRemoval(value.shardId, value.bitset); // if null then this means the shard has already been removed and the stats are 0 anyway for the shard this key belongs to } diff --git a/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java index ef82e73dc4d..0f594d2faca 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.cache.request; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; - -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.cache.RemovalListener; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; @@ -61,7 +59,7 @@ public class ShardRequestCache extends AbstractIndexShardComponent implements Re @Override public void onRemoval(RemovalNotification removalNotification) { - if (removalNotification.wasEvicted()) { + if (removalNotification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED) { evictionsMetric.inc(); } long dec = 0; diff --git a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java deleted file mode 100644 index 71a52a7fbd1..00000000000 --- a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java +++ /dev/null @@ -1,440 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.codec.postingsformat; - -import org.apache.lucene.codecs.*; -import org.apache.lucene.index.*; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.store.*; -import org.apache.lucene.util.*; -import org.elasticsearch.common.util.BloomFilter; - -import java.io.IOException; -import java.util.*; -import java.util.Map.Entry; - -/** - *

    - * A {@link PostingsFormat} useful for low doc-frequency fields such as primary - * keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail" - * for reads in segments known to have no record of the key. A choice of - * delegate PostingsFormat is used to record all other Postings data. - *

    - *

    - * This is a special bloom filter version, based on {@link org.elasticsearch.common.util.BloomFilter} and inspired - * by Lucene {@code org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat}. - * @deprecated only for reading old segments - */ -@Deprecated -public class BloomFilterPostingsFormat extends PostingsFormat { - - public static final String BLOOM_CODEC_NAME = "XBloomFilter"; // the Lucene one is named BloomFilter - public static final int BLOOM_CODEC_VERSION = 1; - public static final int BLOOM_CODEC_VERSION_CHECKSUM = 2; - public static final int BLOOM_CODEC_VERSION_CURRENT = BLOOM_CODEC_VERSION_CHECKSUM; - - /** - * Extension of Bloom Filters file - */ - static final String BLOOM_EXTENSION = "blm"; - - private BloomFilter.Factory bloomFilterFactory = BloomFilter.Factory.DEFAULT; - private PostingsFormat delegatePostingsFormat; - - /** - * Creates Bloom filters for a selection of fields created in the index. This - * is recorded as a set of Bitsets held as a segment summary in an additional - * "blm" file. This PostingsFormat delegates to a choice of delegate - * PostingsFormat for encoding all other postings data. - * - * @param delegatePostingsFormat The PostingsFormat that records all the non-bloom filter data i.e. - * postings info. - * @param bloomFilterFactory The {@link org.elasticsearch.common.util.BloomFilter.Factory} responsible for sizing BloomFilters - * appropriately - */ - public BloomFilterPostingsFormat(PostingsFormat delegatePostingsFormat, - BloomFilter.Factory bloomFilterFactory) { - super(BLOOM_CODEC_NAME); - this.delegatePostingsFormat = delegatePostingsFormat; - this.bloomFilterFactory = bloomFilterFactory; - } - - // Used only by core Lucene at read-time via Service Provider instantiation - - // do not use at Write-time in application code. - public BloomFilterPostingsFormat() { - super(BLOOM_CODEC_NAME); - } - - @Override - public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - throw new UnsupportedOperationException("this codec can only be used for reading"); - } - - @Override - public BloomFilteredFieldsProducer fieldsProducer(SegmentReadState state) - throws IOException { - return new BloomFilteredFieldsProducer(state); - } - - public PostingsFormat getDelegate() { - return delegatePostingsFormat; - } - - private final class LazyBloomLoader implements Accountable { - private final long offset; - private final IndexInput indexInput; - private BloomFilter filter; - - private LazyBloomLoader(long offset, IndexInput origial) { - this.offset = offset; - this.indexInput = origial.clone(); - } - - synchronized BloomFilter get() throws IOException { - if (filter == null) { - try (final IndexInput input = indexInput) { - input.seek(offset); - this.filter = BloomFilter.deserialize(input); - } - } - return filter; - } - - @Override - public long ramBytesUsed() { - return filter == null ? 0l : filter.getSizeInBytes(); - } - - @Override - public Collection getChildResources() { - return Collections.singleton(Accountables.namedAccountable("bloom", ramBytesUsed())); - } - } - - public final class BloomFilteredFieldsProducer extends FieldsProducer { - private FieldsProducer delegateFieldsProducer; - HashMap bloomsByFieldName = new HashMap<>(); - private final int version; - private final IndexInput data; - - // for internal use only - FieldsProducer getDelegate() { - return delegateFieldsProducer; - } - - public BloomFilteredFieldsProducer(SegmentReadState state) - throws IOException { - - final String bloomFileName = IndexFileNames.segmentFileName( - state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION); - final Directory directory = state.directory; - IndexInput dataInput = directory.openInput(bloomFileName, state.context); - try { - ChecksumIndexInput bloomIn = new BufferedChecksumIndexInput(dataInput.clone()); - version = CodecUtil.checkHeader(bloomIn, BLOOM_CODEC_NAME, BLOOM_CODEC_VERSION, - BLOOM_CODEC_VERSION_CURRENT); - // // Load the hash function used in the BloomFilter - // hashFunction = HashFunction.forName(bloomIn.readString()); - // Load the delegate postings format - final String delegatePostings = bloomIn.readString(); - this.delegateFieldsProducer = PostingsFormat.forName(delegatePostings) - .fieldsProducer(state); - this.data = dataInput; - dataInput = null; // null it out such that we don't close it - } finally { - IOUtils.closeWhileHandlingException(dataInput); - } - } - - @Override - public Iterator iterator() { - return delegateFieldsProducer.iterator(); - } - - @Override - public void close() throws IOException { - IOUtils.close(data, delegateFieldsProducer); - } - - @Override - public Terms terms(String field) throws IOException { - LazyBloomLoader filter = bloomsByFieldName.get(field); - if (filter == null) { - return delegateFieldsProducer.terms(field); - } else { - Terms result = delegateFieldsProducer.terms(field); - if (result == null) { - return null; - } - return new BloomFilteredTerms(result, filter.get()); - } - } - - @Override - public int size() { - return delegateFieldsProducer.size(); - } - - @Override - public long ramBytesUsed() { - long size = delegateFieldsProducer.ramBytesUsed(); - for (LazyBloomLoader bloomFilter : bloomsByFieldName.values()) { - size += bloomFilter.ramBytesUsed(); - } - return size; - } - - @Override - public Collection getChildResources() { - List resources = new ArrayList<>(); - resources.addAll(Accountables.namedAccountables("field", bloomsByFieldName)); - if (delegateFieldsProducer != null) { - resources.add(Accountables.namedAccountable("delegate", delegateFieldsProducer)); - } - return Collections.unmodifiableList(resources); - } - - @Override - public void checkIntegrity() throws IOException { - delegateFieldsProducer.checkIntegrity(); - if (version >= BLOOM_CODEC_VERSION_CHECKSUM) { - CodecUtil.checksumEntireFile(data); - } - } - - @Override - public FieldsProducer getMergeInstance() throws IOException { - return delegateFieldsProducer.getMergeInstance(); - } - } - - public static final class BloomFilteredTerms extends FilterLeafReader.FilterTerms { - private BloomFilter filter; - - public BloomFilteredTerms(Terms terms, BloomFilter filter) { - super(terms); - this.filter = filter; - } - - public BloomFilter getFilter() { - return filter; - } - - @Override - public TermsEnum iterator() throws IOException { - return new BloomFilteredTermsEnum(this.in, filter); - } - } - - static final class BloomFilteredTermsEnum extends TermsEnum { - - private Terms delegateTerms; - private TermsEnum delegateTermsEnum; - private BloomFilter filter; - - public BloomFilteredTermsEnum(Terms other, BloomFilter filter) { - this.delegateTerms = other; - this.filter = filter; - } - - void reset(Terms others) { - this.delegateTermsEnum = null; - this.delegateTerms = others; - } - - private TermsEnum getDelegate() throws IOException { - if (delegateTermsEnum == null) { - /* pull the iterator only if we really need it - - * this can be a relatively heavy operation depending on the - * delegate postings format and they underlying directory - * (clone IndexInput) */ - delegateTermsEnum = delegateTerms.iterator(); - } - return delegateTermsEnum; - } - - @Override - public final BytesRef next() throws IOException { - return getDelegate().next(); - } - - @Override - public final boolean seekExact(BytesRef text) - throws IOException { - // The magical fail-fast speed up that is the entire point of all of - // this code - save a disk seek if there is a match on an in-memory - // structure - // that may occasionally give a false positive but guaranteed no false - // negatives - if (!filter.mightContain(text)) { - return false; - } - return getDelegate().seekExact(text); - } - - @Override - public final SeekStatus seekCeil(BytesRef text) - throws IOException { - return getDelegate().seekCeil(text); - } - - @Override - public final void seekExact(long ord) throws IOException { - getDelegate().seekExact(ord); - } - - @Override - public final BytesRef term() throws IOException { - return getDelegate().term(); - } - - @Override - public final long ord() throws IOException { - return getDelegate().ord(); - } - - @Override - public final int docFreq() throws IOException { - return getDelegate().docFreq(); - } - - @Override - public final long totalTermFreq() throws IOException { - return getDelegate().totalTermFreq(); - } - - - @Override - public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { - return getDelegate().postings(reuse, flags); - } - } - - // TODO: would be great to move this out to test code, but the interaction between es090 and bloom is complex - // at least it is not accessible via SPI - public final class BloomFilteredFieldsConsumer extends FieldsConsumer { - private final FieldsConsumer delegateFieldsConsumer; - private final Map bloomFilters = new HashMap<>(); - private final SegmentWriteState state; - private boolean closed = false; - - // private PostingsFormat delegatePostingsFormat; - - public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer, - SegmentWriteState state, PostingsFormat delegatePostingsFormat) { - this.delegateFieldsConsumer = fieldsConsumer; - // this.delegatePostingsFormat=delegatePostingsFormat; - this.state = state; - } - - // for internal use only - public FieldsConsumer getDelegate() { - return delegateFieldsConsumer; - } - - - @Override - public void write(Fields fields) throws IOException { - - // Delegate must write first: it may have opened files - // on creating the class - // (e.g. Lucene41PostingsConsumer), and write() will - // close them; alternatively, if we delayed pulling - // the fields consumer until here, we could do it - // afterwards: - delegateFieldsConsumer.write(fields); - - for(String field : fields) { - Terms terms = fields.terms(field); - if (terms == null) { - continue; - } - FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); - TermsEnum termsEnum = terms.iterator(); - - BloomFilter bloomFilter = null; - - PostingsEnum postings = null; - while (true) { - BytesRef term = termsEnum.next(); - if (term == null) { - break; - } - if (bloomFilter == null) { - bloomFilter = bloomFilterFactory.createFilter(state.segmentInfo.maxDoc()); - assert bloomFilters.containsKey(field) == false; - bloomFilters.put(fieldInfo, bloomFilter); - } - // Make sure there's at least one doc for this term: - postings = termsEnum.postings(postings, 0); - if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { - bloomFilter.put(term); - } - } - } - } - - @Override - public void close() throws IOException { - if (closed) { - return; - } - closed = true; - delegateFieldsConsumer.close(); - // Now we are done accumulating values for these fields - List> nonSaturatedBlooms = new ArrayList<>(); - - for (Entry entry : bloomFilters.entrySet()) { - nonSaturatedBlooms.add(entry); - } - String bloomFileName = IndexFileNames.segmentFileName( - state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION); - IndexOutput bloomOutput = null; - try { - bloomOutput = state.directory - .createOutput(bloomFileName, state.context); - CodecUtil.writeHeader(bloomOutput, BLOOM_CODEC_NAME, - BLOOM_CODEC_VERSION_CURRENT); - // remember the name of the postings format we will delegate to - bloomOutput.writeString(delegatePostingsFormat.getName()); - - // First field in the output file is the number of fields+blooms saved - bloomOutput.writeInt(nonSaturatedBlooms.size()); - for (Entry entry : nonSaturatedBlooms) { - FieldInfo fieldInfo = entry.getKey(); - BloomFilter bloomFilter = entry.getValue(); - bloomOutput.writeInt(fieldInfo.number); - saveAppropriatelySizedBloomFilter(bloomOutput, bloomFilter, fieldInfo); - } - CodecUtil.writeFooter(bloomOutput); - } finally { - IOUtils.close(bloomOutput); - } - //We are done with large bitsets so no need to keep them hanging around - bloomFilters.clear(); - } - - private void saveAppropriatelySizedBloomFilter(IndexOutput bloomOutput, - BloomFilter bloomFilter, FieldInfo fieldInfo) throws IOException { - BloomFilter.serilaize(bloomFilter, bloomOutput); - } - - } -} diff --git a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java deleted file mode 100644 index b6ceba8fb8a..00000000000 --- a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/Elasticsearch090PostingsFormat.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.codec.postingsformat; - -import com.google.common.collect.Iterators; -import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; -import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.FilterLeafReader; -import org.apache.lucene.index.SegmentReadState; -import org.apache.lucene.index.SegmentWriteState; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.util.BloomFilter; -import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.Iterator; -import java.util.function.Predicate; - -/** - * This is the old default postings format for Elasticsearch that special cases - * the _uid field to use a bloom filter while all other fields - * will use a {@link Lucene50PostingsFormat}. This format will reuse the underlying - * {@link Lucene50PostingsFormat} and its files also for the _uid saving up to - * 5 files per segment in the default case. - *

    - * @deprecated only for reading old segments - */ -@Deprecated -public class Elasticsearch090PostingsFormat extends PostingsFormat { - protected final BloomFilterPostingsFormat bloomPostings; - - public Elasticsearch090PostingsFormat() { - super("es090"); - Lucene50PostingsFormat delegate = new Lucene50PostingsFormat(); - assert delegate.getName().equals(Lucene.LATEST_POSTINGS_FORMAT); - bloomPostings = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT); - } - - public PostingsFormat getDefaultWrapped() { - return bloomPostings.getDelegate(); - } - - protected static final Predicate UID_FIELD_FILTER = field -> UidFieldMapper.NAME.equals(field); - - @Override - public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - throw new UnsupportedOperationException("this codec can only be used for reading"); - } - - @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - // we can just return the delegate here since we didn't record bloom filters for - // the other fields. - return bloomPostings.fieldsProducer(state); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java deleted file mode 100644 index 32d9ee620c8..00000000000 --- a/core/src/main/java/org/elasticsearch/index/engine/CreateFailedEngineException.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.Objects; - -/** - * - */ -public class CreateFailedEngineException extends EngineException { - - private final String type; - - private final String id; - - public CreateFailedEngineException(ShardId shardId, String type, String id, Throwable cause) { - super(shardId, "Create failed for [" + type + "#" + id + "]", cause); - Objects.requireNonNull(type, "type must not be null"); - Objects.requireNonNull(id, "id must not be null"); - this.type = type; - this.id = id; - } - - public CreateFailedEngineException(StreamInput in) throws IOException{ - super(in); - type = in.readString(); - id = in.readString(); - } - - public String type() { - return this.type; - } - - public String id() { - return this.id; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(type); - out.writeString(id); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java deleted file mode 100644 index 95d57c53836..00000000000 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; - -/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ -@Deprecated -public class DeleteByQueryFailedEngineException extends EngineException { - - public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) { - super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause); - } - - public DeleteByQueryFailedEngineException(StreamInput in) throws IOException{ - super(in); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java deleted file mode 100644 index 467dd8c14c7..00000000000 --- a/core/src/main/java/org/elasticsearch/index/engine/DocumentAlreadyExistsException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; - -/** - * - */ -public class DocumentAlreadyExistsException extends EngineException { - - public DocumentAlreadyExistsException(ShardId shardId, String type, String id) { - super(shardId, "[" + type + "][" + id + "]: document already exists"); - } - - public DocumentAlreadyExistsException(StreamInput in) throws IOException{ - super(in); - } - - @Override - public RestStatus status() { - return RestStatus.CONFLICT; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 1330ef05a7f..c07be064489 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -60,7 +59,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; -import java.util.function.Supplier; /** * @@ -144,7 +142,8 @@ public abstract class Engine implements Closeable { return new MergeStats(); } - /** A throttling class that can be activated, causing the + /** + * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling * is enabled */ @@ -203,20 +202,15 @@ public abstract class Engine implements Closeable { } } - public abstract void create(Create create) throws EngineException; - - public abstract boolean index(Index index) throws EngineException; + public abstract boolean index(Index operation) throws EngineException; public abstract void delete(Delete delete) throws EngineException; - /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ - @Deprecated - public abstract void delete(DeleteByQuery delete) throws EngineException; - /** * Attempts to do a special commit where the given syncID is put into the commit data. The attempt * succeeds if there are not pending writes in lucene and the current point is equal to the expected one. - * @param syncId id of this sync + * + * @param syncId id of this sync * @param expectedCommitId the expected value of * @return true if the sync commit was made, false o.w. */ @@ -243,7 +237,8 @@ public abstract class Engine implements Closeable { if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) { Releasables.close(searcher); Uid uid = Uid.createUid(get.uid().text()); - throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version()); + throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), + get.versionType().explainConflictForReads(docIdAndVersion.version, get.version())); } } @@ -328,7 +323,7 @@ public abstract class Engine implements Closeable { } catch (IOException e) { // Fall back to reading from the store if reading from the commit fails try { - return store. readLastCommittedSegmentsInfo(); + return store.readLastCommittedSegmentsInfo(); } catch (IOException e2) { e2.addSuppressed(e); throw e2; @@ -366,6 +361,9 @@ public abstract class Engine implements Closeable { stats.addIndexWriterMaxMemoryInBytes(0); } + /** How much heap Lucene's IndexWriter is using */ + abstract public long indexWriterRAMBytesUsed(); + protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) { ensureOpen(); Map segments = new HashMap<>(); @@ -469,7 +467,8 @@ public abstract class Engine implements Closeable { /** * Flushes the state of the engine including the transaction log, clearing memory. - * @param force if true a lucene commit is executed even if no changes need to be committed. + * + * @param force if true a lucene commit is executed even if no changes need to be committed. * @param waitIfOngoing if true this call will block until all currently running flushes have finished. * Otherwise this call will return without blocking. * @return the commit Id for the resulting commit @@ -607,62 +606,97 @@ public abstract class Engine implements Closeable { } } - public static interface Operation { - static enum Type { - CREATE, - INDEX, - DELETE - } - - static enum Origin { - PRIMARY, - REPLICA, - RECOVERY - } - - Type opType(); - - Origin origin(); - } - - public static abstract class IndexingOperation implements Operation { - + public static abstract class Operation { private final Term uid; - private final ParsedDocument doc; private long version; private final VersionType versionType; private final Origin origin; private Translog.Location location; - private final long startTime; private long endTime; - public IndexingOperation(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) { + public Operation(Term uid, long version, VersionType versionType, Origin origin, long startTime) { this.uid = uid; - this.doc = doc; this.version = version; this.versionType = versionType; this.origin = origin; this.startTime = startTime; } - public IndexingOperation(Term uid, ParsedDocument doc) { - this(uid, doc, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + public static enum Origin { + PRIMARY, + REPLICA, + RECOVERY } - @Override public Origin origin() { return this.origin; } - public ParsedDocument parsedDoc() { - return this.doc; - } - public Term uid() { return this.uid; } + public long version() { + return this.version; + } + + public void updateVersion(long version) { + this.version = version; + } + + public void setTranslogLocation(Translog.Location location) { + this.location = location; + } + + public Translog.Location getTranslogLocation() { + return this.location; + } + + public VersionType versionType() { + return this.versionType; + } + + /** + * Returns operation start time in nanoseconds. + */ + public long startTime() { + return this.startTime; + } + + public void endTime(long endTime) { + this.endTime = endTime; + } + + /** + * Returns operation end time in nanoseconds. + */ + public long endTime() { + return this.endTime; + } + } + + public static class Index extends Operation { + + private final ParsedDocument doc; + + public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) { + super(uid, version, versionType, origin, startTime); + this.doc = doc; + } + + public Index(Term uid, ParsedDocument doc) { + this(uid, doc, Versions.MATCH_ANY); + } + + public Index(Term uid, ParsedDocument doc, long version) { + this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + } + + public ParsedDocument parsedDoc() { + return this.doc; + } + public String type() { return this.doc.type(); } @@ -683,27 +717,12 @@ public abstract class Engine implements Closeable { return this.doc.ttl(); } - public long version() { - return this.version; - } - + @Override public void updateVersion(long version) { - this.version = version; + super.updateVersion(version); this.doc.version().setLongValue(version); } - public void setTranslogLocation(Translog.Location location) { - this.location = location; - } - - public Translog.Location getTranslogLocation() { - return this.location; - } - - public VersionType versionType() { - return this.versionType; - } - public String parent() { return this.doc.parent(); } @@ -715,96 +734,17 @@ public abstract class Engine implements Closeable { public BytesReference source() { return this.doc.source(); } - - /** - * Returns operation start time in nanoseconds. - */ - public long startTime() { - return this.startTime; - } - - public void endTime(long endTime) { - this.endTime = endTime; - } - - /** - * Returns operation end time in nanoseconds. - */ - public long endTime() { - return this.endTime; - } - - /** - * Execute this operation against the provided {@link IndexShard} and - * return whether the document was created. - */ - public abstract boolean execute(IndexShard shard); } - public static final class Create extends IndexingOperation { - - public Create(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) { - super(uid, doc, version, versionType, origin, startTime); - } - - public Create(Term uid, ParsedDocument doc) { - super(uid, doc); - } - - @Override - public Type opType() { - return Type.CREATE; - } - - @Override - public boolean execute(IndexShard shard) { - shard.create(this); - return true; - } - } - - public static final class Index extends IndexingOperation { - - public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) { - super(uid, doc, version, versionType, origin, startTime); - } - - public Index(Term uid, ParsedDocument doc) { - super(uid, doc); - } - - @Override - public Type opType() { - return Type.INDEX; - } - - @Override - public boolean execute(IndexShard shard) { - return shard.index(this); - } - } - - public static class Delete implements Operation { + public static class Delete extends Operation { private final String type; private final String id; - private final Term uid; - private long version; - private final VersionType versionType; - private final Origin origin; private boolean found; - private final long startTime; - private long endTime; - private Translog.Location location; - public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) { + super(uid, version, versionType, origin, startTime); this.type = type; this.id = id; - this.uid = uid; - this.version = version; - this.versionType = versionType; - this.origin = origin; - this.startTime = startTime; this.found = found; } @@ -816,16 +756,6 @@ public abstract class Engine implements Closeable { this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found()); } - @Override - public Type opType() { - return Type.DELETE; - } - - @Override - public Origin origin() { - return this.origin; - } - public String type() { return this.type; } @@ -834,55 +764,14 @@ public abstract class Engine implements Closeable { return this.id; } - public Term uid() { - return this.uid; - } - public void updateVersion(long version, boolean found) { - this.version = version; + updateVersion(version); this.found = found; } - /** - * before delete execution this is the version to be deleted. After this is the version of the "delete" transaction record. - */ - public long version() { - return this.version; - } - - public VersionType versionType() { - return this.versionType; - } - public boolean found() { return this.found; } - - /** - * Returns operation start time in nanoseconds. - */ - public long startTime() { - return this.startTime; - } - - public void endTime(long endTime) { - this.endTime = endTime; - } - - /** - * Returns operation end time in nanoseconds. - */ - public long endTime() { - return this.endTime; - } - - public void setTranslogLocation(Translog.Location location) { - this.location = location; - } - - public Translog.Location getTranslogLocation() { - return this.location; - } } public static class DeleteByQuery { @@ -1135,12 +1024,18 @@ public abstract class Engine implements Closeable { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } CommitId commitId = (CommitId) o; - if (!Arrays.equals(id, commitId.id)) return false; + if (!Arrays.equals(id, commitId.id)) { + return false; + } return true; } @@ -1151,5 +1046,6 @@ public abstract class Engine implements Closeable { } } - public void onSettingsChanged() {} + public void onSettingsChanged() { + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index a79587e4347..fd4b5daf4ee 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesWarmer; +import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.TimeUnit; @@ -107,8 +108,6 @@ public final class EngineConfig { public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); - public static final ByteSizeValue DEFAULT_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER"); public static final String DEFAULT_VERSION_MAP_SIZE = "25%"; @@ -139,7 +138,8 @@ public final class EngineConfig { this.failedEngineListener = failedEngineListener; this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); - indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE; + // We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing: + indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER; gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis(); versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); updateVersionMapSize(); @@ -258,10 +258,10 @@ public final class EngineConfig { /** * Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about - * pre and post index and create operations. The operations are used for statistic purposes etc. + * pre and post index. The operations are used for statistic purposes etc. * - * @see org.elasticsearch.index.indexing.ShardIndexingService#postCreate(org.elasticsearch.index.engine.Engine.Create) - * @see org.elasticsearch.index.indexing.ShardIndexingService#preCreate(org.elasticsearch.index.engine.Engine.Create) + * @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index) + * @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index) * */ public ShardIndexingService getIndexingService() { diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java index d7487ef66f1..23f6be7ffd2 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java @@ -30,16 +30,16 @@ import java.io.IOException; */ public class EngineException extends ElasticsearchException { - public EngineException(ShardId shardId, String msg) { - this(shardId, msg, null); + public EngineException(ShardId shardId, String msg, Object... params) { + this(shardId, msg, null, params); } - public EngineException(ShardId shardId, String msg, Throwable cause) { - super(msg, cause); + public EngineException(ShardId shardId, String msg, Throwable cause, Object... params) { + super(msg, cause, params); setShard(shardId); } - public EngineException(StreamInput in) throws IOException{ + public EngineException(StreamInput in) throws IOException { super(in); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 227212dd86e..3973b47f3ac 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -21,8 +21,9 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriter.IndexReaderWarmer; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.*; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.SearcherFactory; +import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -31,7 +32,7 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.InfoStream; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; @@ -48,7 +49,6 @@ import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; -import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.ShardId; @@ -67,7 +67,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; -import java.util.function.Supplier; /** * @@ -182,8 +181,7 @@ public class InternalEngine extends Engine { } translogConfig.setTranslogGeneration(generation); if (generation != null && generation.translogUUID == null) { - // only upgrade on pre-2.0 indices... - Translog.upgradeLegacyTranslog(logger, translogConfig); + throw new IndexFormatTooOldException("trasnlog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); } } final Translog translog = new Translog(translogConfig); @@ -316,7 +314,8 @@ public class InternalEngine extends Engine { } if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) { Uid uid = Uid.createUid(get.uid().text()); - throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version()); + throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), + get.versionType().explainConflictForReads(versionValue.version(), get.version())); } Translog.Operation op = translog.read(versionValue.translogLocation()); if (op != null) { @@ -331,96 +330,7 @@ public class InternalEngine extends Engine { } @Override - public void create(Create create) throws EngineException { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - if (create.origin() == Operation.Origin.RECOVERY) { - // Don't throttle recovery operations - innerCreate(create); - } else { - try (Releasable r = throttle.acquireThrottle()) { - innerCreate(create); - } - } - } catch (OutOfMemoryError | IllegalStateException | IOException t) { - maybeFailEngine("create", t); - throw new CreateFailedEngineException(shardId, create.type(), create.id(), t); - } - checkVersionMapRefresh(); - } - - private void innerCreate(Create create) throws IOException { - synchronized (dirtyLock(create.uid())) { - final long currentVersion; - final VersionValue versionValue; - versionValue = versionMap.getUnderLock(create.uid().bytes()); - if (versionValue == null) { - currentVersion = loadCurrentVersionFromIndex(create.uid()); - } else { - if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) { - currentVersion = Versions.NOT_FOUND; // deleted, and GC - } else { - currentVersion = versionValue.version(); - } - } - innerCreateUnderLock(create, currentVersion, versionValue); - } - } - - private void innerCreateUnderLock(Create create, long currentVersion, VersionValue versionValue) throws IOException { - - // same logic as index - long updatedVersion; - long expectedVersion = create.version(); - if (create.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) { - if (create.origin() == Operation.Origin.RECOVERY) { - return; - } else { - throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion); - } - } - updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion); - - // if the doc exists - boolean doUpdate = false; - if ((versionValue != null && versionValue.delete() == false) || (versionValue == null && currentVersion != Versions.NOT_FOUND)) { - if (create.origin() == Operation.Origin.RECOVERY) { - return; - } else if (create.origin() == Operation.Origin.REPLICA) { - // #7142: the primary already determined it's OK to index this document, and we confirmed above that the version doesn't - // conflict, so we must also update here on the replica to remain consistent: - doUpdate = true; - } else { - // On primary, we throw DAEE if the _uid is already in the index with an older version: - assert create.origin() == Operation.Origin.PRIMARY; - throw new DocumentAlreadyExistsException(shardId, create.type(), create.id()); - } - } - - create.updateVersion(updatedVersion); - - if (doUpdate) { - if (create.docs().size() > 1) { - indexWriter.updateDocuments(create.uid(), create.docs()); - } else { - indexWriter.updateDocument(create.uid(), create.docs().get(0)); - } - } else { - if (create.docs().size() > 1) { - indexWriter.addDocuments(create.docs()); - } else { - indexWriter.addDocument(create.docs().get(0)); - } - } - Translog.Location translogLocation = translog.add(new Translog.Create(create)); - - versionMap.putUnderLock(create.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); - create.setTranslogLocation(translogLocation); - indexingService.postCreateUnderLock(create); - } - - @Override - public boolean index(Index index) throws EngineException { + public boolean index(Index index) { final boolean created; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -440,6 +350,67 @@ public class InternalEngine extends Engine { return created; } + private boolean innerIndex(Index index) throws IOException { + synchronized (dirtyLock(index.uid())) { + final long currentVersion; + final boolean deleted; + VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes()); + if (versionValue == null) { + currentVersion = loadCurrentVersionFromIndex(index.uid()); + deleted = currentVersion == Versions.NOT_FOUND; + } else { + deleted = versionValue.delete(); + if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) { + currentVersion = Versions.NOT_FOUND; // deleted, and GC + } else { + currentVersion = versionValue.version(); + } + } + + long expectedVersion = index.version(); + if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) { + if (index.origin() == Operation.Origin.RECOVERY) { + return false; + } else { + throw new VersionConflictEngineException(shardId, index.type(), index.id(), + index.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted)); + } + } + long updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); + + final boolean created; + index.updateVersion(updatedVersion); + + if (currentVersion == Versions.NOT_FOUND) { + // document does not exists, we can optimize for create + created = true; + if (index.docs().size() > 1) { + indexWriter.addDocuments(index.docs()); + } else { + indexWriter.addDocument(index.docs().get(0)); + } + } else { + if (versionValue != null) { + created = versionValue.delete(); // we have a delete which is not GC'ed... + } else { + created = false; + } + if (index.docs().size() > 1) { + indexWriter.updateDocuments(index.uid(), index.docs()); + } else { + indexWriter.updateDocument(index.uid(), index.docs().get(0)); + } + } + Translog.Location translogLocation = translog.add(new Translog.Index(index)); + + versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); + index.setTranslogLocation(translogLocation); + + indexingService.postIndexUnderLock(index); + return created; + } + } + /** * Forces a refresh if the versionMap is using too much RAM */ @@ -467,62 +438,6 @@ public class InternalEngine extends Engine { } } - private boolean innerIndex(Index index) throws IOException { - synchronized (dirtyLock(index.uid())) { - final long currentVersion; - VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes()); - if (versionValue == null) { - currentVersion = loadCurrentVersionFromIndex(index.uid()); - } else { - if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) { - currentVersion = Versions.NOT_FOUND; // deleted, and GC - } else { - currentVersion = versionValue.version(); - } - } - - long updatedVersion; - long expectedVersion = index.version(); - if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) { - if (index.origin() == Operation.Origin.RECOVERY) { - return false; - } else { - throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion); - } - } - updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); - - final boolean created; - index.updateVersion(updatedVersion); - if (currentVersion == Versions.NOT_FOUND) { - // document does not exists, we can optimize for create - created = true; - if (index.docs().size() > 1) { - indexWriter.addDocuments(index.docs()); - } else { - indexWriter.addDocument(index.docs().get(0)); - } - } else { - if (versionValue != null) { - created = versionValue.delete(); // we have a delete which is not GC'ed... - } else { - created = false; - } - if (index.docs().size() > 1) { - indexWriter.updateDocuments(index.uid(), index.docs()); - } else { - indexWriter.updateDocument(index.uid(), index.docs().get(0)); - } - } - Translog.Location translogLocation = translog.add(new Translog.Index(index)); - - versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); - index.setTranslogLocation(translogLocation); - indexingService.postIndexUnderLock(index); - return created; - } - } - @Override public void delete(Delete delete) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { @@ -549,10 +464,13 @@ public class InternalEngine extends Engine { private void innerDelete(Delete delete) throws IOException { synchronized (dirtyLock(delete.uid())) { final long currentVersion; + final boolean deleted; VersionValue versionValue = versionMap.getUnderLock(delete.uid().bytes()); if (versionValue == null) { currentVersion = loadCurrentVersionFromIndex(delete.uid()); + deleted = currentVersion == Versions.NOT_FOUND; } else { + deleted = versionValue.delete(); if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) { currentVersion = Versions.NOT_FOUND; // deleted, and GC } else { @@ -562,11 +480,12 @@ public class InternalEngine extends Engine { long updatedVersion; long expectedVersion = delete.version(); - if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) { + if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) { if (delete.origin() == Operation.Origin.RECOVERY) { return; } else { - throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion); + throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), + delete.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted)); } } updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); @@ -591,48 +510,6 @@ public class InternalEngine extends Engine { } } - /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ - @Deprecated - @Override - public void delete(DeleteByQuery delete) throws EngineException { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - if (delete.origin() == Operation.Origin.RECOVERY) { - // Don't throttle recovery operations - innerDelete(delete); - } else { - try (Releasable r = throttle.acquireThrottle()) { - innerDelete(delete); - } - } - } - } - - private void innerDelete(DeleteByQuery delete) throws EngineException { - try { - Query query = delete.query(); - if (delete.aliasFilter() != null) { - query = new BooleanQuery.Builder() - .add(query, Occur.MUST) - .add(delete.aliasFilter(), Occur.FILTER) - .build(); - } - if (delete.nested()) { - query = new IncludeNestedDocsQuery(query, delete.parentFilter()); - } - - indexWriter.deleteDocuments(query); - translog.add(new Translog.DeleteByQuery(delete)); - } catch (Throwable t) { - maybeFailEngine("delete_by_query", t); - throw new DeleteByQueryFailedEngineException(shardId, delete, t); - } - - // TODO: This is heavy, since we refresh, but we must do this because we don't know which documents were in fact deleted (i.e., our - // versionMap isn't updated), so we must force a cutover to a new reader to "see" the deletions: - refresh("delete_by_query"); - } - @Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing @@ -904,6 +781,11 @@ public class InternalEngine extends Engine { stats.addIndexWriterMaxMemoryInBytes((long) (indexWriter.getConfig().getRAMBufferSizeMB() * 1024 * 1024)); } + @Override + public long indexWriterRAMBytesUsed() { + return indexWriter.ramBytesUsed(); + } + @Override public List segments(boolean verbose) { try (ReleasableLock lock = readLock.acquire()) { @@ -974,7 +856,7 @@ public class InternalEngine extends Engine { } private Object dirtyLock(BytesRef uid) { - int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length); + int hash = Murmur3HashFunction.hash(uid.bytes, uid.offset, uid.length); return dirtyLocks[MathUtils.mod(hash, dirtyLocks.length)]; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Segment.java b/core/src/main/java/org/elasticsearch/index/engine/Segment.java index cbccaa1959a..7d3882fd9b6 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.engine; -import com.google.common.collect.Iterators; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.elasticsearch.common.Nullable; @@ -32,7 +31,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.List; public class Segment implements Streamable { diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 7588ffae355..921f1167f43 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -102,11 +102,6 @@ public class ShadowEngine extends Engine { } - @Override - public void create(Create create) throws EngineException { - throw new UnsupportedOperationException(shardId + " create operation not allowed on shadow engine"); - } - @Override public boolean index(Index index) throws EngineException { throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine"); @@ -117,13 +112,6 @@ public class ShadowEngine extends Engine { throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine"); } - /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ - @Deprecated - @Override - public void delete(DeleteByQuery delete) throws EngineException { - throw new UnsupportedOperationException(shardId + " delete-by-query operation not allowed on shadow engine"); - } - @Override public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) { throw new UnsupportedOperationException(shardId + " sync commit operation not allowed on shadow engine"); @@ -245,4 +233,9 @@ public class ShadowEngine extends Engine { return lastCommittedSegmentInfos; } + @Override + public long indexWriterRAMBytesUsed() { + // No IndexWriter + throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 8c2d35297e7..9b038c6e77c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -29,8 +29,16 @@ import java.io.IOException; */ public class VersionConflictEngineException extends EngineException { - public VersionConflictEngineException(ShardId shardId, String type, String id, long current, long provided) { - super(shardId, "[" + type + "][" + id + "]: version conflict, current [" + current + "], provided [" + provided + "]"); + public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) { + this(shardId, null, type, id, explanation); + } + + public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) { + this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation); + } + + public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { + super(shardId, msg, cause, params); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java index 858453fcba4..651bc405a84 100644 --- a/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java +++ b/core/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java @@ -28,39 +28,8 @@ public abstract class IndexingOperationListener { /** * Called before the indexing occurs. */ - public Engine.Create preCreate(Engine.Create create) { - return create; - } - - /** - * Called after the indexing occurs, under a locking scheme to maintain - * concurrent updates to the same doc. - *

    - * Note, long operations should not occur under this callback. - */ - public void postCreateUnderLock(Engine.Create create) { - - } - - /** - * Called after create index operation occurred. - */ - public void postCreate(Engine.Create create) { - - } - - /** - * Called after create index operation occurred with exception. - */ - public void postCreate(Engine.Create create, Throwable ex) { - - } - - /** - * Called before the indexing occurs. - */ - public Engine.Index preIndex(Engine.Index index) { - return index; + public Engine.Index preIndex(Engine.Index operation) { + return operation; } /** diff --git a/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java index ea45db2e912..292c2a16e91 100644 --- a/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/indexing/IndexingSlowLog.java @@ -128,10 +128,6 @@ public final class IndexingSlowLog { postIndexing(index.parsedDoc(), tookInNanos); } - void postCreate(Engine.Create create, long tookInNanos) { - postIndexing(create.parsedDoc(), tookInNanos); - } - /** * Reads how much of the source to log. The user can specify any value they * like and numbers are interpreted the maximum number of characters to log diff --git a/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java b/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java index b61ba7fe04d..a3a1fa5b4a7 100644 --- a/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java +++ b/core/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java @@ -86,25 +86,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent { listeners.remove(listener); } - public Engine.Create preCreate(Engine.Create create) { - totalStats.indexCurrent.inc(); - typeStats(create.type()).indexCurrent.inc(); - for (IndexingOperationListener listener : listeners) { - create = listener.preCreate(create); - } - return create; - } - - public void postCreateUnderLock(Engine.Create create) { - for (IndexingOperationListener listener : listeners) { - try { - listener.postCreateUnderLock(create); - } catch (Exception e) { - logger.warn("postCreateUnderLock listener [{}] failed", e, listener); - } - } - } - public void throttlingActivated() { totalStats.setThrottled(true); } @@ -113,40 +94,13 @@ public class ShardIndexingService extends AbstractIndexShardComponent { totalStats.setThrottled(false); } - public void postCreate(Engine.Create create) { - long took = create.endTime() - create.startTime(); - totalStats.indexMetric.inc(took); - totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(create.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); - slowLog.postCreate(create, took); - for (IndexingOperationListener listener : listeners) { - try { - listener.postCreate(create); - } catch (Exception e) { - logger.warn("postCreate listener [{}] failed", e, listener); - } - } - } - - public void postCreate(Engine.Create create, Throwable ex) { - for (IndexingOperationListener listener : listeners) { - try { - listener.postCreate(create, ex); - } catch (Throwable t) { - logger.warn("postCreate listener [{}] failed", t, listener); - } - } - } - - public Engine.Index preIndex(Engine.Index index) { + public Engine.Index preIndex(Engine.Index operation) { totalStats.indexCurrent.inc(); - typeStats(index.type()).indexCurrent.inc(); + typeStats(operation.type()).indexCurrent.inc(); for (IndexingOperationListener listener : listeners) { - index = listener.preIndex(index); + operation = listener.preIndex(operation); } - return index; + return operation; } public void postIndexUnderLock(Engine.Index index) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index 65a6d3ae91d..91371a2663a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -64,7 +64,7 @@ import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.similarity.SimilarityLookupService; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -86,7 +86,7 @@ public class DocumentMapperParser { final MapperService mapperService; final AnalysisService analysisService; private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class); - private final SimilarityLookupService similarityLookupService; + private final SimilarityService similarityService; private final ScriptService scriptService; private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser(); @@ -100,12 +100,12 @@ public class DocumentMapperParser { private volatile SortedMap additionalRootMappers; public DocumentMapperParser(@IndexSettings Settings indexSettings, MapperService mapperService, AnalysisService analysisService, - SimilarityLookupService similarityLookupService, ScriptService scriptService) { + SimilarityService similarityService, ScriptService scriptService) { this.indexSettings = indexSettings; this.parseFieldMatcher = new ParseFieldMatcher(indexSettings); this.mapperService = mapperService; this.analysisService = analysisService; - this.similarityLookupService = similarityLookupService; + this.similarityService = similarityService; this.scriptService = scriptService; Map typeParsers = new HashMap<>(); typeParsers.put(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser()); @@ -170,7 +170,7 @@ public class DocumentMapperParser { } public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, analysisService, similarityLookupService, mapperService, typeParsers, indexVersionCreated, parseFieldMatcher); + return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher); } public DocumentMapper parse(String source) throws MapperParsingException { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index db2919e0217..97435e039e1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -122,7 +122,7 @@ class DocumentParser implements Closeable { // entire type is disabled parser.skipChildren(); } else if (emptyDoc == false) { - Mapper update = parseObject(context, mapping.root); + Mapper update = parseObject(context, mapping.root, true); if (update != null) { context.addDynamicMappingsUpdate(update); } @@ -194,7 +194,7 @@ class DocumentParser implements Closeable { return doc; } - static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper) throws IOException { + static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException { if (mapper.isEnabled() == false) { context.parser().skipChildren(); return null; @@ -202,6 +202,10 @@ class DocumentParser implements Closeable { XContentParser parser = context.parser(); String currentFieldName = parser.currentName(); + if (atRoot && MapperService.isMetadataField(currentFieldName) && + Version.indexCreated(context.indexSettings()).onOrAfter(Version.V_2_0_0_beta1)) { + throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters."); + } XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.VALUE_NULL) { // the object is null ("obj1" : null), simply bail @@ -302,7 +306,7 @@ class DocumentParser implements Closeable { private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { if (mapper instanceof ObjectMapper) { - return parseObject(context, (ObjectMapper) mapper); + return parseObject(context, (ObjectMapper) mapper, false); } else { FieldMapper fieldMapper = (FieldMapper)mapper; Mapper update = fieldMapper.parse(context); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index ec53fbaa0b4..45bef68ee00 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -34,8 +34,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.core.TypeParsers; import org.elasticsearch.index.mapper.internal.AllFieldMapper; -import org.elasticsearch.index.similarity.SimilarityLookupService; import org.elasticsearch.index.similarity.SimilarityProvider; +import org.elasticsearch.index.similarity.SimilarityService; import java.io.IOException; import java.util.ArrayList; @@ -447,7 +447,7 @@ public abstract class FieldMapper extends Mapper { if (fieldType().similarity() != null) { builder.field("similarity", fieldType().similarity().name()); } else if (includeDefaults) { - builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY); + builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY); } if (includeDefaults || hasCustomFieldDataSettings()) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index f55ca93c276..9ca34e1c573 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -26,9 +26,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.similarity.SimilarityLookupService; +import org.elasticsearch.index.similarity.SimilarityProvider; import java.util.Map; +import java.util.function.Function; public abstract class Mapper implements ToXContent, Iterable { @@ -84,18 +85,18 @@ public abstract class Mapper implements ToXContent, Iterable { private final AnalysisService analysisService; - private final SimilarityLookupService similarityLookupService; + private final Function similarityLookupService; private final MapperService mapperService; - private final Map typeParsers; + private final Function typeParsers; private final Version indexVersionCreated; private final ParseFieldMatcher parseFieldMatcher; - public ParserContext(String type, AnalysisService analysisService, SimilarityLookupService similarityLookupService, - MapperService mapperService, Map typeParsers, + public ParserContext(String type, AnalysisService analysisService, Function similarityLookupService, + MapperService mapperService, Function typeParsers, Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) { this.type = type; this.analysisService = analysisService; @@ -114,8 +115,8 @@ public abstract class Mapper implements ToXContent, Iterable { return analysisService; } - public SimilarityLookupService similarityLookupService() { - return similarityLookupService; + public SimilarityProvider getSimilarity(String name) { + return similarityLookupService.apply(name); } public MapperService mapperService() { @@ -123,7 +124,7 @@ public abstract class Mapper implements ToXContent, Iterable { } public TypeParser typeParser(String type) { - return typeParsers.get(Strings.toUnderscoreCase(type)); + return typeParsers.apply(Strings.toUnderscoreCase(type)); } public Version indexVersionCreated() { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 0357ef24060..bbd96f7d930 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectHashSet; -import com.google.common.collect.Iterators; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; @@ -51,7 +50,7 @@ import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.similarity.SimilarityLookupService; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.percolator.PercolatorService; @@ -65,13 +64,13 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -126,12 +125,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable { @Inject public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService, - SimilarityLookupService similarityLookupService, + SimilarityService similarityService, ScriptService scriptService) { super(index, indexSettings); this.analysisService = analysisService; this.fieldTypes = new FieldTypeLookup(); - this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityLookupService, scriptService); + this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, scriptService); this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer()); this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer()); this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); @@ -186,13 +185,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable { */ public Iterable docMappers(final boolean includingDefaultMapping) { return () -> { - final Iterator iterator; + final Collection documentMappers; if (includingDefaultMapping) { - iterator = mappers.values().iterator(); + documentMappers = mappers.values(); } else { - iterator = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).iterator(); + documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList()); } - return Iterators.unmodifiableIterator(iterator); + return Collections.unmodifiableCollection(documentMappers).iterator(); }; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 78d038526b3..7468f4fb2f6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -79,7 +79,6 @@ public class BinaryFieldMapper extends FieldMapper { @Override public BinaryFieldMapper build(BuilderContext context) { setupFieldType(context); - ((BinaryFieldType)fieldType).setTryUncompressing(context.indexCreatedVersion().before(Version.V_2_0_0_beta1)); return new BinaryFieldMapper(name, fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } @@ -103,13 +102,11 @@ public class BinaryFieldMapper extends FieldMapper { } static final class BinaryFieldType extends MappedFieldType { - private boolean tryUncompressing = false; public BinaryFieldType() {} protected BinaryFieldType(BinaryFieldType ref) { super(ref); - this.tryUncompressing = ref.tryUncompressing; } @Override @@ -117,40 +114,12 @@ public class BinaryFieldMapper extends FieldMapper { return new BinaryFieldType(this); } - @Override - public boolean equals(Object o) { - if (!super.equals(o)) return false; - BinaryFieldType that = (BinaryFieldType) o; - return Objects.equals(tryUncompressing, that.tryUncompressing); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), tryUncompressing); - } @Override public String typeName() { return CONTENT_TYPE; } - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); - BinaryFieldType other = (BinaryFieldType)fieldType; - if (tryUncompressing() != other.tryUncompressing()) { - conflicts.add("mapper [" + names().fullName() + "] has different [try_uncompressing] (IMPOSSIBLE)"); - } - } - - public boolean tryUncompressing() { - return tryUncompressing; - } - - public void setTryUncompressing(boolean tryUncompressing) { - checkIfFrozen(); - this.tryUncompressing = tryUncompressing; - } @Override public BytesReference value(Object value) { @@ -172,15 +141,7 @@ public class BinaryFieldMapper extends FieldMapper { throw new ElasticsearchParseException("failed to convert bytes", e); } } - try { - if (tryUncompressing) { // backcompat behavior - return CompressorFactory.uncompressIfNeeded(bytes); - } else { - return bytes; - } - } catch (IOException e) { - throw new ElasticsearchParseException("failed to decompress source", e); - } + return bytes; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 7f06c223e62..0e512bf4281 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper.core; -import com.carrotsearch.hppc.DoubleArrayList; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; @@ -36,8 +34,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -286,17 +282,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { fields.add(field); } if (fieldType().hasDocValues()) { - if (useSortedNumericDocValues) { - addDocValue(context, fields, doubleToSortableLong(value)); - } else { - CustomDoubleNumericDocValuesField field = (CustomDoubleNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName()); - if (field != null) { - field.add(value); - } else { - field = new CustomDoubleNumericDocValuesField(fieldType().names().indexName(), value); - context.doc().addWithKey(fieldType().names().indexName(), field); - } - } + addDocValue(context, fields, doubleToSortableLong(value)); } } @@ -346,30 +332,4 @@ public class DoubleFieldMapper extends NumberFieldMapper { } } - public static class CustomDoubleNumericDocValuesField extends CustomNumericDocValuesField { - - private final DoubleArrayList values; - - public CustomDoubleNumericDocValuesField(String name, double value) { - super(name); - values = new DoubleArrayList(); - add(value); - } - - public void add(double value) { - values.add(value); - } - - @Override - public BytesRef binaryValue() { - CollectionUtils.sortAndDedup(values); - - final byte[] bytes = new byte[values.size() * 8]; - for (int i = 0; i < values.size(); ++i) { - ByteUtils.writeDoubleLE(values.get(i), bytes, i * 8); - } - return new BytesRef(bytes); - } - - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index caeb2d7a188..9a607ffd415 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper.core; -import com.carrotsearch.hppc.FloatArrayList; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; @@ -37,8 +35,6 @@ import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -298,17 +294,7 @@ public class FloatFieldMapper extends NumberFieldMapper { fields.add(field); } if (fieldType().hasDocValues()) { - if (useSortedNumericDocValues) { - addDocValue(context, fields, floatToSortableInt(value)); - } else { - CustomFloatNumericDocValuesField field = (CustomFloatNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName()); - if (field != null) { - field.add(value); - } else { - field = new CustomFloatNumericDocValuesField(fieldType().names().indexName(), value); - context.doc().addWithKey(fieldType().names().indexName(), field); - } - } + addDocValue(context, fields, floatToSortableInt(value)); } } @@ -357,31 +343,4 @@ public class FloatFieldMapper extends NumberFieldMapper { return Float.toString(number); } } - - public static class CustomFloatNumericDocValuesField extends CustomNumericDocValuesField { - - private final FloatArrayList values; - - public CustomFloatNumericDocValuesField(String name, float value) { - super(name); - values = new FloatArrayList(); - add(value); - } - - public void add(float value) { - values.add(value); - } - - @Override - public BytesRef binaryValue() { - CollectionUtils.sortAndDedup(values); - - final byte[] bytes = new byte[values.size() * 4]; - for (int i = 0; i < values.size(); ++i) { - ByteUtils.writeFloatLE(values.get(i), bytes, i * 4); - } - return new BytesRef(bytes); - } - - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 78406c2afbc..3fba511fb52 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper.core; -import com.carrotsearch.hppc.LongArrayList; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.TokenStream; @@ -31,14 +30,10 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.search.Query; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.*; @@ -170,21 +165,12 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM protected Explicit coerce; - /** - * True if index version is 1.4+ - *

    - * In this case numerics are encoded with SORTED_NUMERIC docvalues, - * otherwise for older indexes we must continue to write BINARY (for now) - */ - protected final boolean useSortedNumericDocValues; - protected NumberFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit ignoreMalformed, Explicit coerce, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.ignoreMalformed = ignoreMalformed; this.coerce = coerce; - this.useSortedNumericDocValues = Version.indexCreated(indexSettings).onOrAfter(Version.V_1_4_0_Beta1); } @Override @@ -225,17 +211,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM protected abstract void innerParseCreateField(ParseContext context, List fields) throws IOException; protected final void addDocValue(ParseContext context, List fields, long value) { - if (useSortedNumericDocValues) { - fields.add(new SortedNumericDocValuesField(fieldType().names().indexName(), value)); - } else { - CustomLongNumericDocValuesField field = (CustomLongNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName()); - if (field != null) { - field.add(value); - } else { - field = new CustomLongNumericDocValuesField(fieldType().names().indexName(), value); - context.doc().addWithKey(fieldType().names().indexName(), field); - } - } + fields.add(new SortedNumericDocValuesField(fieldType().names().indexName(), value)); } /** @@ -414,40 +390,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } - - public static class CustomLongNumericDocValuesField extends CustomNumericDocValuesField { - - private final LongArrayList values; - - public CustomLongNumericDocValuesField(String name, long value) { - super(name); - values = new LongArrayList(); - add(value); - } - - public void add(long value) { - values.add(value); - } - - @Override - public BytesRef binaryValue() { - CollectionUtils.sortAndDedup(values); - - // here is the trick: - // - the first value is zig-zag encoded so that eg. -5 would become positive and would be better compressed by vLong - // - for other values, we only encode deltas using vLong - final byte[] bytes = new byte[values.size() * ByteUtils.MAX_BYTES_VLONG]; - final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); - ByteUtils.writeVLong(out, ByteUtils.zigZagEncode(values.get(0))); - for (int i = 1; i < values.size(); ++i) { - final long delta = values.get(i) - values.get(i - 1); - ByteUtils.writeVLong(out, delta); - } - return new BytesRef(bytes, 0, out.getPosition()); - } - - } - @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index 0588bd1e044..3f142cc2f9c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -173,7 +173,7 @@ public class TypeParsers { builder.omitNorms(nodeBooleanValue(propNode)); iterator.remove(); } else if (propName.equals("similarity")) { - builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString())); + builder.similarity(parserContext.getSimilarity(propNode.toString())); iterator.remove(); } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); @@ -277,7 +277,7 @@ public class TypeParsers { // ignore for old indexes iterator.remove(); } else if (propName.equals("similarity")) { - builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString())); + builder.similarity(parserContext.getSimilarity(propNode.toString())); iterator.remove(); } else if (propName.equals("fielddata")) { final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 4111786009e..b264bfa4bc3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.geo; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.Iterators; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; @@ -30,6 +29,7 @@ import org.apache.lucene.util.XGeoHashUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; @@ -39,14 +39,7 @@ import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeMappingException; -import org.elasticsearch.index.mapper.MergeResult; -import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField; @@ -54,18 +47,10 @@ import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Map; +import java.util.*; -import static org.elasticsearch.index.mapper.MapperBuilders.doubleField; -import static org.elasticsearch.index.mapper.MapperBuilders.geoPointField; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; -import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; -import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; +import static org.elasticsearch.index.mapper.MapperBuilders.*; +import static org.elasticsearch.index.mapper.core.TypeParsers.*; /** * Parsing: We handle: diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index e538a00da16..59b664dbd65 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.similarity.SimilarityLookupService; +import org.elasticsearch.index.similarity.SimilarityService; import java.io.IOException; import java.util.Iterator; @@ -300,7 +300,7 @@ public class AllFieldMapper extends MetadataFieldMapper { if (fieldType().similarity() != null) { builder.field("similarity", fieldType().similarity().name()); } else if (includeDefaults) { - builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY); + builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 1ac34df5063..1d73139fb25 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper.ip; -import com.google.common.net.InetAddresses; import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; @@ -29,6 +28,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index d811f1f6e71..1f8a4c61f9a 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -242,29 +242,12 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent private class RealTimePercolatorOperationListener extends IndexingOperationListener { @Override - public Engine.Create preCreate(Engine.Create create) { + public Engine.Index preIndex(Engine.Index operation) { // validate the query here, before we index - if (PercolatorService.TYPE_NAME.equals(create.type())) { - parsePercolatorDocument(create.id(), create.source()); + if (PercolatorService.TYPE_NAME.equals(operation.type())) { + parsePercolatorDocument(operation.id(), operation.source()); } - return create; - } - - @Override - public void postCreateUnderLock(Engine.Create create) { - // add the query under a doc lock - if (PercolatorService.TYPE_NAME.equals(create.type())) { - addPercolateQuery(create.id(), create.source()); - } - } - - @Override - public Engine.Index preIndex(Engine.Index index) { - // validate the query here, before we index - if (PercolatorService.TYPE_NAME.equals(index.type())) { - parsePercolatorDocument(index.id(), index.source()); - } - return index; + return operation; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 4ad63ebc1fc..59d20cef611 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -314,8 +314,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder shell = new ArrayList<>(); int size = in.readVInt(); for (int i = 0; i < size; i++) { - shell.add(GeoPoint.readGeoPointFrom(in)); + shell.add(in.readGeoPoint()); } GeoPolygonQueryBuilder builder = new GeoPolygonQueryBuilder(fieldName, shell); builder.validationMethod = GeoValidationMethod.readGeoValidationMethodFrom(in); @@ -176,7 +176,7 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder { - private static final DecayFunction EXP_DECAY_FUNCTION = new ExponentialDecayScoreFunction(); + public static final DecayFunction EXP_DECAY_FUNCTION = new ExponentialDecayScoreFunction(); public ExponentialDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) { super(fieldName, origin, scale, offset); diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java index 621b22a583d..618503a9b3c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder; public class GaussDecayFunctionBuilder extends DecayFunctionBuilder { - private static final DecayFunction GAUSS_DECAY_FUNCTION = new GaussScoreFunction(); + public static final DecayFunction GAUSS_DECAY_FUNCTION = new GaussScoreFunction(); public GaussDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) { super(fieldName, origin, scale, offset); diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java index 2e63aedd84b..f321ee166f1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java @@ -26,7 +26,7 @@ import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder; public class LinearDecayFunctionBuilder extends DecayFunctionBuilder { - private static final DecayFunction LINEAR_DECAY_FUNCTION = new LinearDecayScoreFunction(); + public static final DecayFunction LINEAR_DECAY_FUNCTION = new LinearDecayScoreFunction(); public LinearDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) { super(fieldName, origin, scale, offset); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ea2d555ae0d..86e53b41c9b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; -import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -43,6 +42,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; @@ -84,8 +84,8 @@ import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.snapshots.IndexShardRepository; -import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store.MetadataSnapshot; +import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.suggest.stats.ShardSuggestMetric; @@ -100,6 +100,7 @@ import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.percolator.PercolatorService; @@ -118,16 +119,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; + public class IndexShard extends AbstractIndexShardComponent implements IndexSettingsService.Listener { private final ThreadPool threadPool; private final MapperService mapperService; - private final IndexQueryParserService queryParserService; private final IndexCache indexCache; private final InternalIndicesLifecycle indicesLifecycle; private final Store store; private final MergeSchedulerConfig mergeSchedulerConfig; - private final IndexAliasesService indexAliasesService; private final ShardIndexingService indexingService; private final ShardSearchStats searchService; private final ShardGetService getService; @@ -190,6 +190,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett private final IndexSearcherWrapper searcherWrapper; + /** True if this shard is still indexing (recently) and false if we've been idle for long enough (as periodically checked by {@link + * IndexingMemoryController}). */ + private final AtomicBoolean active = new AtomicBoolean(); + + private volatile long lastWriteNS; + private final IndexingMemoryController indexingMemoryController; + @Inject public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) { super(shardId, indexSettings); @@ -202,11 +209,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett this.indicesLifecycle = (InternalIndicesLifecycle) provider.getIndicesLifecycle(); this.store = store; this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings); - this.threadPool = provider.getThreadPool(); - this.mapperService = provider.getMapperService(); - this.queryParserService = provider.getQueryParserService(); - this.indexCache = provider.getIndexCache(); - this.indexAliasesService = provider.getIndexAliasesService(); + this.threadPool = provider.getThreadPool(); + this.mapperService = provider.getMapperService(); + this.indexCache = provider.getIndexCache(); this.indexingService = new ShardIndexingService(shardId, indexSettings); this.getService = new ShardGetService(this, mapperService); this.termVectorsService = provider.getTermVectorsService(); @@ -242,11 +247,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)); this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false); this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); + this.indexingMemoryController = provider.getIndexingMemoryController(); + this.searcherWrapper = provider.getIndexSearcherWrapper(); - this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, mapperService, indexFieldDataService); + this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, provider.getQueryParserService(), indexingService, mapperService, indexFieldDataService); if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { percolatorQueriesRegistry.enableRealTimePercolator(); } + + // We start up inactive + active.set(false); } public Store store() { @@ -278,7 +288,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett return indexFieldDataService; } - public MapperService mapperService() { return mapperService;} + public MapperService mapperService() { + return mapperService; + } public ShardSearchStats searchService() { return this.searchService; @@ -423,40 +435,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett return previousState; } - public Engine.Create prepareCreate(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) { - try { - return prepareCreate(docMapper(source.type()), source, version, versionType, origin); - } catch (Throwable t) { - verifyNotClosed(t); - throw t; - } - } - - static Engine.Create prepareCreate(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) { - long startTime = System.nanoTime(); - ParsedDocument doc = docMapper.getDocumentMapper().parse(source); - if (docMapper.getMapping() != null) { - doc.addDynamicMappingsUpdate(docMapper.getMapping()); - } - return new Engine.Create(docMapper.getDocumentMapper().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime); - } - - public void create(Engine.Create create) { - writeAllowed(create.origin()); - create = indexingService.preCreate(create); - try { - if (logger.isTraceEnabled()) { - logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs()); - } - getEngine().create(create); - create.endTime(System.nanoTime()); - } catch (Throwable ex) { - indexingService.postCreate(create, ex); - throw ex; - } - indexingService.postCreate(create); - } - public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) { try { return prepareIndex(docMapper(source.type()), source, version, versionType, origin); @@ -480,7 +458,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett * updated. */ public boolean index(Engine.Index index) { - writeAllowed(index.origin()); + ensureWriteAllowed(index); + markLastWrite(index); index = indexingService.preIndex(index); final boolean created; try { @@ -504,7 +483,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett } public void delete(Engine.Delete delete) { - writeAllowed(delete.origin()); + ensureWriteAllowed(delete); + markLastWrite(delete); delete = indexingService.preDelete(delete); try { if (logger.isTraceEnabled()) { @@ -914,7 +894,24 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett } } - private void writeAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException { + /** Returns timestamp of last indexing operation */ + public long getLastWriteNS() { + return lastWriteNS; + } + + /** Records timestamp of the last write operation, possibly switching {@code active} to true if we were inactive. */ + private void markLastWrite(Engine.Operation op) { + lastWriteNS = op.startTime(); + if (active.getAndSet(true) == false) { + // We are currently inactive, but a new write operation just showed up, so we now notify IMC + // to wake up and fix our indexing buffer. We could do this async instead, but cost should + // be low, and it's rare this happens. + indexingMemoryController.forceCheck(); + } + } + + private void ensureWriteAllowed(Engine.Operation op) throws IllegalIndexShardStateException { + Engine.Operation.Origin origin = op.origin(); IndexShardState state = this.state; // one time volatile read if (origin == Engine.Operation.Origin.PRIMARY) { @@ -976,6 +973,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett this.failedEngineListener.delegates.add(failedEngineListener); } + /** Change the indexing and translog buffer sizes. If {@code IndexWriter} is currently using more than + * the new buffering indexing size then we do a refresh to free up the heap. */ public void updateBufferSize(ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) { final EngineConfig config = engineConfig; @@ -994,27 +993,50 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett // so we push changes these changes down to IndexWriter: engine.onSettingsChanged(); - if (shardIndexingBufferSize == EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER) { - // it's inactive: make sure we do a refresh / full IW flush in this case, since the memory - // changes only after a "data" change has happened to the writer - // the index writer lazily allocates memory and a refresh will clean it all up. - logger.debug("updating index_buffer_size from [{}] to (inactive) [{}]", preValue, shardIndexingBufferSize); + long iwBytesUsed = engine.indexWriterRAMBytesUsed(); + + String message = LoggerMessageFormat.format("updating index_buffer_size from [{}] to [{}]; IndexWriter now using [{}] bytes", + preValue, shardIndexingBufferSize, iwBytesUsed); + + if (iwBytesUsed > shardIndexingBufferSize.bytes()) { + // our allowed buffer was changed to less than we are currently using; we ask IW to refresh + // so it clears its buffers (otherwise it won't clear until the next indexing/delete op) + logger.debug(message + "; now refresh to clear IndexWriter memory"); + + // TODO: should IW have an API to move segments to disk, but not refresh? Its flush method is protected... try { refresh("update index buffer"); } catch (Throwable e) { - logger.warn("failed to refresh after setting shard to inactive", e); + logger.warn("failed to refresh after decreasing index buffer", e); } } else { - logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, shardIndexingBufferSize); + logger.debug(message); } } engine.getTranslog().updateBuffer(shardTranslogBufferSize); } - public void markAsInactive() { - updateBufferSize(EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER, TranslogConfig.INACTIVE_SHARD_TRANSLOG_BUFFER); - indicesLifecycle.onShardInactive(this); + /** Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last + * indexing operation, and become inactive (reducing indexing and translog buffers to tiny values) if so. This returns true + * if the shard is inactive. */ + public boolean checkIdle(long inactiveTimeNS) { + if (System.nanoTime() - lastWriteNS >= inactiveTimeNS) { + boolean wasActive = active.getAndSet(false); + if (wasActive) { + updateBufferSize(IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER); + logger.debug("shard is now inactive"); + indicesLifecycle.onShardInactive(this); + } + } + + return active.get() == false; + } + + /** Returns {@code true} if this shard is active (has seen indexing ops in the last {@link + * IndexingMemoryController#SHARD_INACTIVE_TIME_SETTING} (default 5 minutes), else {@code false}. */ + public boolean getActive() { + return active.get(); } public final boolean isFlushOnClose() { @@ -1416,8 +1438,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett } private final EngineConfig newEngineConfig(TranslogConfig translogConfig, QueryCachingPolicy cachingPolicy) { - final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, queryParserService, - indexAliasesService, indexCache, logger) { + final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, logger) { @Override protected void operationProcessed() { assert recoveryState != null; @@ -1426,7 +1447,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett }; return new EngineConfig(shardId, threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, - mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig); + mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig); } private static class IndexShardOperationCounter extends AbstractRefCounted { @@ -1499,6 +1520,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett /** * Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the * Flush thread-pool asynchronously. + * * @return true if a new flush is scheduled otherwise false. */ public boolean maybeFlush() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index c81b9e5c541..8bdf1fb5382 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.index.shard; +import java.io.IOException; + import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexServicesProvider; @@ -26,8 +28,7 @@ import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.store.Store; - -import java.io.IOException; +import org.elasticsearch.index.translog.TranslogStats; /** * ShadowIndexShard extends {@link IndexShard} to add file synchronization @@ -82,4 +83,9 @@ public final class ShadowIndexShard extends IndexShard { public boolean allowsPrimaryPromotion() { return false; } + + @Override + public TranslogStats translogStats() { + return null; // shadow engine has no translog + } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index f893ec4d89d..68c552d4419 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -18,27 +18,13 @@ */ package org.elasticsearch.index.shard; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.join.BitSetProducer; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.aliases.IndexAliasesService; -import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.*; -import org.elasticsearch.index.query.IndexQueryParserService; -import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -53,20 +39,13 @@ import static org.elasticsearch.index.mapper.SourceToParse.source; */ public class TranslogRecoveryPerformer { private final MapperService mapperService; - private final IndexQueryParserService queryParserService; - private final IndexAliasesService indexAliasesService; - private final IndexCache indexCache; private final ESLogger logger; private final Map recoveredTypes = new HashMap<>(); private final ShardId shardId; - protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, IndexQueryParserService queryParserService, - IndexAliasesService indexAliasesService, IndexCache indexCache, ESLogger logger) { + protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) { this.shardId = shardId; this.mapperService = mapperService; - this.queryParserService = queryParserService; - this.indexAliasesService = indexAliasesService; - this.indexCache = indexCache; this.logger = logger; } @@ -145,19 +124,7 @@ public class TranslogRecoveryPerformer { public void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates) { try { switch (operation.opType()) { - case CREATE: - Translog.Create create = (Translog.Create) operation; - Engine.Create engineCreate = IndexShard.prepareCreate(docMapper(create.type()), - source(create.source()).index(shardId.getIndex()).type(create.type()).id(create.id()) - .routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl()), - create.version(), create.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY); - maybeAddMappingUpdate(engineCreate.type(), engineCreate.parsedDoc().dynamicMappingsUpdate(), engineCreate.id(), allowMappingUpdates); - if (logger.isTraceEnabled()) { - logger.trace("[translog] recover [create] op of [{}][{}]", create.type(), create.id()); - } - engine.create(engineCreate); - break; - case SAVE: + case INDEX: Translog.Index index = (Translog.Index) operation; Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(index.source()).type(index.type()).id(index.id()) .routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()), @@ -177,11 +144,6 @@ public class TranslogRecoveryPerformer { engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, System.nanoTime(), false)); break; - case DELETE_BY_QUERY: - Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation; - engine.delete(prepareDeleteByQuery(queryParserService, mapperService, indexAliasesService, indexCache, - deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types())); - break; default: throw new IllegalStateException("No operation defined for [" + operation + "]"); } @@ -206,38 +168,6 @@ public class TranslogRecoveryPerformer { operationProcessed(); } - private static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) { - long startTime = System.nanoTime(); - if (types == null) { - types = Strings.EMPTY_ARRAY; - } - Query query; - try { - query = queryParserService.parseQuery(source).query(); - } catch (ParsingException ex) { - // for BWC we try to parse directly the query since pre 1.0.0.Beta2 we didn't require a top level query field - if (queryParserService.getIndexCreatedVersion().onOrBefore(Version.V_1_0_0_Beta2)) { - try { - XContentParser parser = XContentHelper.createParser(source); - ParsedQuery parse = queryParserService.parse(parser); - query = parse.query(); - } catch (Throwable t) { - ex.addSuppressed(t); - throw ex; - } - } else { - throw ex; - } - } - Query searchFilter = mapperService.searchFilter(types); - if (searchFilter != null) { - query = Queries.filtered(query, searchFilter); - } - - Query aliasFilter = indexAliasesService.aliasFilter(filteringAliases); - BitSetProducer parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()) : null; - return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types); - } /** * Called once for every processed operation by this recovery performer. diff --git a/core/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java index 1983c4e8ecf..68e50da1348 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; /** @@ -40,8 +38,7 @@ public class BM25SimilarityProvider extends AbstractSimilarityProvider { private final BM25Similarity similarity; - @Inject - public BM25SimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public BM25SimilarityProvider(String name, Settings settings) { super(name); float k1 = settings.getAsFloat("k1", 1.2f); float b = settings.getAsFloat("b", 0.75f); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java index 7858cb132d6..d5caa4aab98 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java @@ -77,8 +77,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider { private final DFRSimilarity similarity; - @Inject - public DFRSimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public DFRSimilarityProvider(String name, Settings settings) { super(name); BasicModel basicModel = parseBasicModel(settings); AfterEffect afterEffect = parseAfterEffect(settings); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java index 0f9feba952b..3acbd9821af 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/DefaultSimilarityProvider.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.DefaultSimilarity; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; /** @@ -37,8 +35,7 @@ public class DefaultSimilarityProvider extends AbstractSimilarityProvider { private final DefaultSimilarity similarity = new DefaultSimilarity(); - @Inject - public DefaultSimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public DefaultSimilarityProvider(String name, Settings settings) { super(name); boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); this.similarity.setDiscountOverlaps(discountOverlaps); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java index 2f619c5615e..4b83bc838f2 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java @@ -28,8 +28,6 @@ import org.apache.lucene.search.similarities.LambdaDF; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.Normalization; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import java.util.HashMap; @@ -67,8 +65,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider { private final IBSimilarity similarity; - @Inject - public IBSimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public IBSimilarityProvider(String name, Settings settings) { super(name); Distribution distribution = parseDistribution(settings); Lambda lambda = parseLambda(settings); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java index efea285639b..24494dc0b75 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.LMDirichletSimilarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; /** @@ -38,8 +36,7 @@ public class LMDirichletSimilarityProvider extends AbstractSimilarityProvider { private final LMDirichletSimilarity similarity; - @Inject - public LMDirichletSimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public LMDirichletSimilarityProvider(String name, Settings settings) { super(name); float mu = settings.getAsFloat("mu", 2000f); this.similarity = new LMDirichletSimilarity(mu); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java index 5d30b300d5c..3d5a40fc153 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java @@ -38,8 +38,7 @@ public class LMJelinekMercerSimilarityProvider extends AbstractSimilarityProvide private final LMJelinekMercerSimilarity similarity; - @Inject - public LMJelinekMercerSimilarityProvider(@Assisted String name, @Assisted Settings settings) { + public LMJelinekMercerSimilarityProvider(String name, Settings settings) { super(name); float lambda = settings.getAsFloat("lambda", 0.1f); this.similarity = new LMJelinekMercerSimilarity(lambda); diff --git a/core/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java deleted file mode 100644 index 4b3f0ccf035..00000000000 --- a/core/src/main/java/org/elasticsearch/index/similarity/PreBuiltSimilarityProvider.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for pre-built Similarities - */ -public class PreBuiltSimilarityProvider extends AbstractSimilarityProvider { - - public static class Factory implements SimilarityProvider.Factory { - - private final PreBuiltSimilarityProvider similarity; - - public Factory(String name, Similarity similarity) { - this.similarity = new PreBuiltSimilarityProvider(name, similarity); - } - - @Override - public SimilarityProvider create(String name, Settings settings) { - return similarity; - } - - public String name() { - return similarity.name(); - } - - public SimilarityProvider get() { - return similarity; - } - } - - private final Similarity similarity; - - /** - * Creates a new {@link PreBuiltSimilarityProvider} with the given name and given - * pre-built Similarity - * - * @param name Name of the Provider - * @param similarity Pre-built Similarity - */ - public PreBuiltSimilarityProvider(String name, Similarity similarity) { - super(name); - this.similarity = similarity; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java index 6e03bcf0848..29312f2557b 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java @@ -20,19 +20,18 @@ package org.elasticsearch.index.similarity; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Scopes; -import org.elasticsearch.common.inject.assistedinject.FactoryProvider; -import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import java.util.HashMap; import java.util.Map; +import java.util.function.BiFunction; /** * {@link SimilarityModule} is responsible gathering registered and configured {@link SimilarityProvider} - * implementations and making them available through the {@link SimilarityLookupService} and {@link SimilarityService}. + * implementations and making them available through the {@link SimilarityService}. * - * New {@link SimilarityProvider} implementations can be registered through {@link #addSimilarity(String, Class)} + * New {@link SimilarityProvider} implementations can be registered through {@link #addSimilarity(String, BiFunction)} * while existing Providers can be referenced through Settings under the {@link #SIMILARITY_SETTINGS_PREFIX} prefix * along with the "type" value. For example, to reference the {@link BM25SimilarityProvider}, the configuration * "index.similarity.my_similarity.type : "BM25" can be used. @@ -42,16 +41,12 @@ public class SimilarityModule extends AbstractModule { public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity"; private final Settings settings; - private final Map> similarities = new HashMap<>(); + private final Map> similarities = new HashMap<>(); + private final Index index; - public SimilarityModule(Settings settings) { + public SimilarityModule(Index index, Settings settings) { this.settings = settings; - addSimilarity("default", DefaultSimilarityProvider.class); - addSimilarity("BM25", BM25SimilarityProvider.class); - addSimilarity("DFR", DFRSimilarityProvider.class); - addSimilarity("IB", IBSimilarityProvider.class); - addSimilarity("LMDirichlet", LMDirichletSimilarityProvider.class); - addSimilarity("LMJelinekMercer", LMJelinekMercerSimilarityProvider.class); + this.index = index; } /** @@ -60,36 +55,16 @@ public class SimilarityModule extends AbstractModule { * @param name Name of the SimilarityProvider * @param similarity SimilarityProvider to register */ - public void addSimilarity(String name, Class similarity) { + public void addSimilarity(String name, BiFunction similarity) { + if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) { + throw new IllegalArgumentException("similarity for name: [" + name + " is already registered"); + } similarities.put(name, similarity); } @Override protected void configure() { - MapBinder similarityBinder = - MapBinder.newMapBinder(binder(), String.class, SimilarityProvider.Factory.class); - - Map similaritySettings = settings.getGroups(SIMILARITY_SETTINGS_PREFIX); - for (Map.Entry entry : similaritySettings.entrySet()) { - String name = entry.getKey(); - Settings settings = entry.getValue(); - - String typeName = settings.get("type"); - if (typeName == null) { - throw new IllegalArgumentException("Similarity [" + name + "] must have an associated type"); - } else if (similarities.containsKey(typeName) == false) { - throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]"); - } - similarityBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(SimilarityProvider.Factory.class, similarities.get(typeName))).in(Scopes.SINGLETON); - } - - for (PreBuiltSimilarityProvider.Factory factory : Similarities.listFactories()) { - if (!similarities.containsKey(factory.name())) { - similarityBinder.addBinding(factory.name()).toInstance(factory); - } - } - - bind(SimilarityLookupService.class).asEagerSingleton(); - bind(SimilarityService.class).asEagerSingleton(); + SimilarityService service = new SimilarityService(index, settings, new HashMap<>(similarities)); + bind(SimilarityService.class).toInstance(service); } } diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java index 38f56af4514..6433181dd6d 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -40,19 +40,4 @@ public interface SimilarityProvider { * @return Provided {@link Similarity} */ Similarity get(); - - /** - * Factory for creating {@link SimilarityProvider} instances - */ - public static interface Factory { - - /** - * Creates a new {@link SimilarityProvider} instance - * - * @param name Name of the provider - * @param settings Settings to be used by the Provider - * @return {@link SimilarityProvider} instance created by the Factory - */ - SimilarityProvider create(String name, Settings settings); - } } diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 98faa87e94b..a77a2de4dff 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -25,55 +25,96 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.settings.IndexSettings; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiFunction; + /** * */ public class SimilarityService extends AbstractIndexComponent { - private final SimilarityLookupService similarityLookupService; - private final MapperService mapperService; - - private final Similarity perFieldSimilarity; - + public final static String DEFAULT_SIMILARITY = "default"; + private final Similarity defaultSimilarity; + private final Similarity baseSimilarity; + private final Map similarities; + static final Map> DEFAULTS; + static final Map> BUILT_IN; + static { + Map> defaults = new HashMap<>(); + Map> buildIn = new HashMap<>(); + defaults.put("default", DefaultSimilarityProvider::new); + defaults.put("BM25", BM25SimilarityProvider::new); + buildIn.put("default", DefaultSimilarityProvider::new); + buildIn.put("BM25", BM25SimilarityProvider::new); + buildIn.put("DFR", DFRSimilarityProvider::new); + buildIn.put("IB", IBSimilarityProvider::new); + buildIn.put("LMDirichlet", LMDirichletSimilarityProvider::new); + buildIn.put("LMJelinekMercer", LMJelinekMercerSimilarityProvider::new); + DEFAULTS = Collections.unmodifiableMap(defaults); + BUILT_IN = Collections.unmodifiableMap(buildIn); + } public SimilarityService(Index index) { this(index, Settings.Builder.EMPTY_SETTINGS); } public SimilarityService(Index index, Settings settings) { - this(index, settings, new SimilarityLookupService(index, settings), null); + this(index, settings, Collections.EMPTY_MAP); } @Inject - public SimilarityService(Index index, @IndexSettings Settings indexSettings, - final SimilarityLookupService similarityLookupService, final MapperService mapperService) { + public SimilarityService(Index index, @IndexSettings Settings indexSettings, Map> similarities) { super(index, indexSettings); - this.similarityLookupService = similarityLookupService; - this.mapperService = mapperService; - - Similarity defaultSimilarity = similarityLookupService.similarity(SimilarityLookupService.DEFAULT_SIMILARITY).get(); + Map providers = new HashMap<>(similarities.size()); + Map similaritySettings = indexSettings.getGroups(SimilarityModule.SIMILARITY_SETTINGS_PREFIX); + for (Map.Entry entry : similaritySettings.entrySet()) { + String name = entry.getKey(); + Settings settings = entry.getValue(); + String typeName = settings.get("type"); + if (typeName == null) { + throw new IllegalArgumentException("Similarity [" + name + "] must have an associated type"); + } else if ((similarities.containsKey(typeName) || BUILT_IN.containsKey(typeName)) == false) { + throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]"); + } + BiFunction factory = similarities.getOrDefault(typeName, BUILT_IN.get(typeName)); + if (settings == null) { + settings = Settings.Builder.EMPTY_SETTINGS; + } + providers.put(name, factory.apply(name, settings)); + } + addSimilarities(similaritySettings, providers, DEFAULTS); + this.similarities = providers; + defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); // Expert users can configure the base type as being different to default, but out-of-box we use default. - Similarity baseSimilarity = (similarityLookupService.similarity("base") != null) ? similarityLookupService.similarity("base").get() : - defaultSimilarity; - - this.perFieldSimilarity = (mapperService != null) ? new PerFieldSimilarity(defaultSimilarity, baseSimilarity, mapperService) : + baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() : defaultSimilarity; } - public Similarity similarity() { - return perFieldSimilarity; + public Similarity similarity(MapperService mapperService) { + // TODO we can maybe factor out MapperService here entirely by introducing an interface for the lookup? + return (mapperService != null) ? new PerFieldSimilarity(defaultSimilarity, baseSimilarity, mapperService) : + defaultSimilarity; } - public SimilarityLookupService similarityLookupService() { - return similarityLookupService; + private void addSimilarities(Map similaritySettings, Map providers, Map> similarities) { + for (Map.Entry> entry : similarities.entrySet()) { + String name = entry.getKey(); + BiFunction factory = entry.getValue(); + Settings settings = similaritySettings.get(name); + if (settings == null) { + settings = Settings.Builder.EMPTY_SETTINGS; + } + providers.put(name, factory.apply(name, settings)); + } } - public MapperService mapperService() { - return mapperService; + public SimilarityProvider getSimilarity(String name) { + return similarities.get(name); } static class PerFieldSimilarity extends PerFieldSimilarityWrapper { diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 5084895151b..4265d611fbf 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,11 +23,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.TwoPhaseCommit; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -38,7 +36,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -54,7 +51,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.EOFException; -import java.io.FileNotFoundException; import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.*; @@ -189,99 +185,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - /** - * This method is used to upgarde a pre 2.0 translog structure to the new checkpoint based structure. - * The {@link org.elasticsearch.index.translog.Translog.TranslogGeneration} in the given config is - * used to determine the smallest file generation to upgrade. The procedure will travers the translog - * directory to find all files that have a generation greater or equal to the translog generation and - * renames the files to the new .tlog file format. - *

    - * For each of the files a ${filename}.ckp - * file is written containing the size of the translog in bytes, it's ID and the number of operations. Since - * these files are all relying on the pre 2.0 truncation feature where we read operations until hitting an {@link EOFException} - * the number of operations are recoreded as -1. Later once these files are opened for reading legacy readers will - * allow for unknown number of operations and mimic the old behavior. - *

    - */ - public static void upgradeLegacyTranslog(ESLogger logger, TranslogConfig config) throws IOException { - Path translogPath = config.getTranslogPath(); - TranslogGeneration translogGeneration = config.getTranslogGeneration(); - if (translogGeneration == null) { - throw new IllegalArgumentException("TranslogGeneration must be set in order to upgrade"); - } - if (translogGeneration.translogUUID != null) { - throw new IllegalArgumentException("TranslogGeneration has a non-null UUID - index must have already been upgraded"); - } - try { - if (Checkpoint.read(translogPath.resolve(CHECKPOINT_FILE_NAME)) != null) { - throw new IllegalStateException(CHECKPOINT_FILE_NAME + " file already present, translog is already upgraded"); - } - } catch (NoSuchFileException | FileNotFoundException ex) { - logger.debug("upgrading translog - no checkpoint found"); - } - final Pattern parseLegacyIdPattern = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)((\\.recovering))?$"); // here we have to be lenient - nowhere else! - try (DirectoryStream stream = Files.newDirectoryStream(translogPath, new DirectoryStream.Filter() { - @Override - public boolean accept(Path entry) throws IOException { - Matcher matcher = parseLegacyIdPattern.matcher(entry.getFileName().toString()); - if (matcher.matches() == false) { - Matcher newIdMatcher = PARSE_STRICT_ID_PATTERN.matcher(entry.getFileName().toString()); - return newIdMatcher.matches(); - } else { - return true; - } - } - })) { - long latestGeneration = -1; - List filesToUpgrade = new ArrayList<>(); - for (Path path : stream) { - Matcher matcher = parseLegacyIdPattern.matcher(path.getFileName().toString()); - if (matcher.matches()) { - long generation = Long.parseLong(matcher.group(1)); - if (generation >= translogGeneration.translogFileGeneration) { - latestGeneration = Math.max(translogGeneration.translogFileGeneration, generation); - } - filesToUpgrade.add(new PathWithGeneration(path, generation)); - } else { - Matcher strict_matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); - if (strict_matcher.matches()) { - throw new IllegalStateException("non-legacy translog file [" + path.getFileName().toString() + "] found on a translog that wasn't upgraded yet"); - } - } - } - if (latestGeneration < translogGeneration.translogFileGeneration) { - throw new IllegalStateException("latest found translog has a lower generation that the excepcted uncommitted " + translogGeneration.translogFileGeneration + " > " + latestGeneration); - } - CollectionUtil.timSort(filesToUpgrade, new Comparator() { - @Override - public int compare(PathWithGeneration o1, PathWithGeneration o2) { - long gen1 = o1.getGeneration(); - long gen2 = o2.getGeneration(); - return Long.compare(gen1, gen2); - } - }); - for (PathWithGeneration pathAndGeneration : filesToUpgrade) { - final Path path = pathAndGeneration.getPath(); - final long generation = pathAndGeneration.getGeneration(); - final Path target = path.resolveSibling(getFilename(generation)); - logger.debug("upgrading translog copy file from {} to {}", path, target); - Files.move(path, target, StandardCopyOption.ATOMIC_MOVE); - logger.debug("write commit point for {}", target); - if (generation == latestGeneration) { - // for the last one we only write a checkpoint not a real commit - Checkpoint checkpoint = new Checkpoint(Files.size(translogPath.resolve(getFilename(latestGeneration))), -1, latestGeneration); - Checkpoint.write(translogPath.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); - } else { - Checkpoint checkpoint = new Checkpoint(Files.size(target), -1, generation); - Checkpoint.write(translogPath.resolve(getCommitCheckpointFileName(generation)), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); - } - } - - IOUtils.fsync(translogPath, true); - - } - } - /** recover all translog files found on disk */ private ArrayList recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException { boolean success = false; @@ -465,11 +368,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } /** - * Adds a created / delete / index operations to the transaction log. + * Adds a delete / index operations to the transaction log. * * @see org.elasticsearch.index.translog.Translog.Operation - * @see org.elasticsearch.index.translog.Translog.Create - * @see org.elasticsearch.index.translog.Translog.Index + * @see Index * @see org.elasticsearch.index.translog.Translog.Delete */ public Location add(Operation operation) throws TranslogException { @@ -874,10 +776,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC */ public interface Operation extends Streamable { enum Type { + @Deprecated CREATE((byte) 1), - SAVE((byte) 2), - DELETE((byte) 3), - DELETE_BY_QUERY((byte) 4); + INDEX((byte) 2), + DELETE((byte) 3); private final byte id; @@ -894,11 +796,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC case 1: return CREATE; case 2: - return SAVE; + return INDEX; case 3: return DELETE; - case 4: - return DELETE_BY_QUERY; default: throw new IllegalArgumentException("No type mapped for [" + id + "]"); } @@ -929,199 +829,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - public static class Create implements Operation { - public static final int SERIALIZATION_FORMAT = 6; - - private String id; - private String type; - private BytesReference source; - private String routing; - private String parent; - private long timestamp; - private long ttl; - private long version = Versions.MATCH_ANY; - private VersionType versionType = VersionType.INTERNAL; - - public Create() { - } - - public Create(Engine.Create create) { - this.id = create.id(); - this.type = create.type(); - this.source = create.source(); - this.routing = create.routing(); - this.parent = create.parent(); - this.timestamp = create.timestamp(); - this.ttl = create.ttl(); - this.version = create.version(); - this.versionType = create.versionType(); - } - - public Create(String type, String id, byte[] source) { - this.id = id; - this.type = type; - this.source = new BytesArray(source); - } - - @Override - public Type opType() { - return Type.CREATE; - } - - @Override - public long estimateSize() { - return ((id.length() + type.length()) * 2) + source.length() + 12; - } - - public String id() { - return this.id; - } - - public BytesReference source() { - return this.source; - } - - public String type() { - return this.type; - } - - public String routing() { - return this.routing; - } - - public String parent() { - return this.parent; - } - - public long timestamp() { - return this.timestamp; - } - - public long ttl() { - return this.ttl; - } - - public long version() { - return this.version; - } - - public VersionType versionType() { - return versionType; - } - - @Override - public Source getSource() { - return new Source(source, routing, parent, timestamp, ttl); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - int version = in.readVInt(); // version - id = in.readString(); - type = in.readString(); - source = in.readBytesReference(); - if (version >= 1) { - if (in.readBoolean()) { - routing = in.readString(); - } - } - if (version >= 2) { - if (in.readBoolean()) { - parent = in.readString(); - } - } - if (version >= 3) { - this.version = in.readLong(); - } - if (version >= 4) { - this.timestamp = in.readLong(); - } - if (version >= 5) { - this.ttl = in.readLong(); - } - if (version >= 6) { - this.versionType = VersionType.fromValue(in.readByte()); - } - - assert versionType.validateVersionForWrites(version); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(SERIALIZATION_FORMAT); - out.writeString(id); - out.writeString(type); - out.writeBytesReference(source); - if (routing == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(routing); - } - if (parent == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(parent); - } - out.writeLong(version); - out.writeLong(timestamp); - out.writeLong(ttl); - out.writeByte(versionType.getValue()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Create create = (Create) o; - - if (timestamp != create.timestamp || - ttl != create.ttl || - version != create.version || - id.equals(create.id) == false || - type.equals(create.type) == false || - source.equals(create.source) == false) { - return false; - } - if (routing != null ? !routing.equals(create.routing) : create.routing != null) { - return false; - } - if (parent != null ? !parent.equals(create.parent) : create.parent != null) { - return false; - } - return versionType == create.versionType; - - } - - @Override - public int hashCode() { - int result = id.hashCode(); - result = 31 * result + type.hashCode(); - result = 31 * result + source.hashCode(); - result = 31 * result + (routing != null ? routing.hashCode() : 0); - result = 31 * result + (parent != null ? parent.hashCode() : 0); - result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); - result = 31 * result + (int) (ttl ^ (ttl >>> 32)); - result = 31 * result + (int) (version ^ (version >>> 32)); - result = 31 * result + versionType.hashCode(); - return result; - } - - @Override - public String toString() { - return "Create{" + - "id='" + id + '\'' + - ", type='" + type + '\'' + - '}'; - } - } - public static class Index implements Operation { public static final int SERIALIZATION_FORMAT = 6; @@ -1158,7 +865,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC @Override public Type opType() { - return Type.SAVE; + return Type.INDEX; } @Override @@ -1425,137 +1132,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - /** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ - @Deprecated - public static class DeleteByQuery implements Operation { - - public static final int SERIALIZATION_FORMAT = 2; - private BytesReference source; - @Nullable - private String[] filteringAliases; - private String[] types = Strings.EMPTY_ARRAY; - - public DeleteByQuery() { - } - - public DeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - this(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types()); - } - - public DeleteByQuery(BytesReference source, String[] filteringAliases, String... types) { - this.source = source; - this.types = types == null ? Strings.EMPTY_ARRAY : types; - this.filteringAliases = filteringAliases; - } - - @Override - public Type opType() { - return Type.DELETE_BY_QUERY; - } - - @Override - public long estimateSize() { - return source.length() + 8; - } - - public BytesReference source() { - return this.source; - } - - public String[] filteringAliases() { - return filteringAliases; - } - - public String[] types() { - return this.types; - } - - @Override - public Source getSource() { - throw new IllegalStateException("trying to read doc source from delete_by_query operation"); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - int version = in.readVInt(); // version - source = in.readBytesReference(); - if (version < 2) { - // for query_parser_name, which was removed - if (in.readBoolean()) { - in.readString(); - } - } - int typesSize = in.readVInt(); - if (typesSize > 0) { - types = new String[typesSize]; - for (int i = 0; i < typesSize; i++) { - types[i] = in.readString(); - } - } - if (version >= 1) { - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(SERIALIZATION_FORMAT); - out.writeBytesReference(source); - out.writeVInt(types.length); - for (String type : types) { - out.writeString(type); - } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - DeleteByQuery that = (DeleteByQuery) o; - - if (!Arrays.equals(filteringAliases, that.filteringAliases)) { - return false; - } - if (!Arrays.equals(types, that.types)) { - return false; - } - return source.equals(that.source); - } - - @Override - public int hashCode() { - int result = source.hashCode(); - result = 31 * result + (filteringAliases != null ? Arrays.hashCode(filteringAliases) : 0); - result = 31 * result + Arrays.hashCode(types); - return result; - } - - @Override - public String toString() { - return "DeleteByQuery{" + - "types=" + Arrays.toString(types) + - '}'; - } - } public enum Durabilty { /** @@ -1667,13 +1243,12 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC static Translog.Operation newOperationFromType(Translog.Operation.Type type) throws IOException { switch (type) { case CREATE: - return new Translog.Create(); + // the deserialization logic in Index was identical to that of Create when create was deprecated + return new Index(); case DELETE: return new Translog.Delete(); - case DELETE_BY_QUERY: - return new Translog.DeleteByQuery(); - case SAVE: - return new Translog.Index(); + case INDEX: + return new Index(); default: throw new IOException("No type for [" + type + "]"); } @@ -1781,10 +1356,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return current.getFirstOperationOffset(); } - List getRecoveredReaders() { // for testing - return this.recoveredTranslogs; - } - private void ensureOpen() { if (closed.get()) { throw new AlreadyClosedException("translog is already closed"); @@ -1798,21 +1369,4 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return outstandingViews.size(); } - private static class PathWithGeneration { - private final Path path; - private final long generation; - - public PathWithGeneration(Path path, long generation) { - this.path = path; - this.generation = generation; - } - - public Path getPath() { - return path; - } - - public long getGeneration() { - return generation; - } - } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 4d74961619c..30ab8144e1e 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.TranslogGeneration; +import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; import java.nio.file.Path; @@ -42,7 +43,6 @@ public final class TranslogConfig { public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type"; public static final String INDEX_TRANSLOG_BUFFER_SIZE = "index.translog.fs.buffer_size"; public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval"; - public static final ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb", "INACTIVE_SHARD_TRANSLOG_BUFFER"); private final TimeValue syncInterval; private final BigArrays bigArrays; @@ -73,7 +73,7 @@ public final class TranslogConfig { this.threadPool = threadPool; this.bigArrays = bigArrays; this.type = TranslogWriter.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name())); - this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, ByteSizeValue.parseBytesSizeValue("64k", INDEX_TRANSLOG_BUFFER_SIZE)).bytes(); // Not really interesting, updated by IndexingMemoryController... + this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER).bytes(); // Not really interesting, updated by IndexingMemoryController... syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); if (syncInterval.millis() > 0 && threadPool != null) { diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index 1af0a747c27..a4431b520cd 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -18,11 +18,10 @@ */ package org.elasticsearch.index.translog; +import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; @@ -31,17 +30,23 @@ import java.io.IOException; /** * */ -public class TranslogStats implements ToXContent, Streamable { +public class TranslogStats extends ToXContentToBytes implements Streamable { - private long translogSizeInBytes = 0; - private int estimatedNumberOfOperations = -1; + private long translogSizeInBytes; + private int numberOfOperations; public TranslogStats() { } - public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) { + public TranslogStats(int numberOfOperations, long translogSizeInBytes) { + if (numberOfOperations < 0) { + throw new IllegalArgumentException("numberOfOperations must be >= 0"); + } + if (translogSizeInBytes < 0) { + throw new IllegalArgumentException("translogSizeInBytes must be >= 0"); + } assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >= 0, got [" + translogSizeInBytes + "]"; - this.estimatedNumberOfOperations = estimatedNumberOfOperations; + this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; } @@ -50,22 +55,22 @@ public class TranslogStats implements ToXContent, Streamable { return; } - this.estimatedNumberOfOperations += translogStats.estimatedNumberOfOperations; - this.translogSizeInBytes = +translogStats.translogSizeInBytes; + this.numberOfOperations += translogStats.numberOfOperations; + this.translogSizeInBytes += translogStats.translogSizeInBytes; } - public ByteSizeValue translogSizeInBytes() { - return new ByteSizeValue(translogSizeInBytes); + public long getTranslogSizeInBytes() { + return translogSizeInBytes; } public long estimatedNumberOfOperations() { - return estimatedNumberOfOperations; + return numberOfOperations; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.TRANSLOG); - builder.field(Fields.OPERATIONS, estimatedNumberOfOperations); + builder.field(Fields.OPERATIONS, numberOfOperations); builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, translogSizeInBytes); builder.endObject(); return builder; @@ -80,13 +85,13 @@ public class TranslogStats implements ToXContent, Streamable { @Override public void readFrom(StreamInput in) throws IOException { - estimatedNumberOfOperations = in.readVInt(); + numberOfOperations = in.readVInt(); translogSizeInBytes = in.readVLong(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(estimatedNumberOfOperations); + out.writeVInt(numberOfOperations); out.writeVLong(translogSizeInBytes); } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index ba160fc636c..95525767001 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -50,7 +50,6 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNameModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.LocalNodeIdModule; import org.elasticsearch.index.analysis.AnalysisModule; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.IndexCache; @@ -330,7 +329,6 @@ public class IndicesService extends AbstractLifecycleComponent i ModulesBuilder modules = new ModulesBuilder(); modules.add(new IndexNameModule(index)); - modules.add(new LocalNodeIdModule(localNodeId)); modules.add(new IndexSettingsModule(index, indexSettings)); // plugin modules must be added here, before others or we can get crazy injection errors... for (Module pluginModule : pluginsService.indexModules(indexSettings)) { @@ -338,7 +336,7 @@ public class IndicesService extends AbstractLifecycleComponent i } modules.add(new IndexStoreModule(indexSettings)); modules.add(new AnalysisModule(indexSettings, indicesAnalysisService)); - modules.add(new SimilarityModule(indexSettings)); + modules.add(new SimilarityModule(index, indexSettings)); modules.add(new IndexCacheModule(indexSettings)); modules.add(new IndexModule()); pluginsService.processModules(modules); diff --git a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java index 4ab4691c166..5fb70b61160 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java @@ -21,12 +21,6 @@ package org.elasticsearch.indices.cache.request; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.cache.Weigher; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.Accountable; @@ -35,6 +29,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.*; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -51,14 +46,11 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Iterator; -import java.util.Set; -import java.util.concurrent.Callable; +import java.io.IOException; +import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static org.elasticsearch.common.Strings.hasLength; @@ -162,25 +154,17 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis private void buildCache() { long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes(); - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() - .maximumWeight(sizeInBytes).weigher(new QueryCacheWeigher()).removalListener(this); - cacheBuilder.concurrencyLevel(concurrencyLevel); + CacheBuilder cacheBuilder = CacheBuilder.builder() + .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); + // cacheBuilder.concurrencyLevel(concurrencyLevel); if (expire != null) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); } cache = cacheBuilder.build(); } - private static class QueryCacheWeigher implements Weigher { - - @Override - public int weigh(Key key, Value value) { - return (int) (key.ramBytesUsed() + value.ramBytesUsed()); - } - } - public void close() { reaper.close(); cache.invalidateAll(); @@ -197,9 +181,6 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis @Override public void onRemoval(RemovalNotification notification) { - if (notification.getKey() == null) { - return; - } notification.getKey().shard.requestCache().onRemoval(notification); } @@ -258,8 +239,8 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception { assert canCache(request, context); Key key = buildKey(request, context); - Loader loader = new Loader(queryPhase, context, key); - Value value = cache.get(key, loader); + Loader loader = new Loader(queryPhase, context); + Value value = cache.computeIfAbsent(key, loader); if (loader.isLoaded()) { key.shard.requestCache().onMiss(); // see if its the first time we see this reader, and make sure to register a cleanup key @@ -279,17 +260,15 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis } } - private static class Loader implements Callable { + private static class Loader implements CacheLoader { private final QueryPhase queryPhase; private final SearchContext context; - private final IndicesRequestCache.Key key; private boolean loaded; - Loader(QueryPhase queryPhase, SearchContext context, IndicesRequestCache.Key key) { + Loader(QueryPhase queryPhase, SearchContext context) { this.queryPhase = queryPhase; this.context = context; - this.key = key; } public boolean isLoaded() { @@ -297,7 +276,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis } @Override - public Value call() throws Exception { + public Value load(Key key) throws Exception { queryPhase.execute(context); /* BytesStreamOutput allows to pass the expected size but by default uses @@ -473,7 +452,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { CleanupKey lookupKey = new CleanupKey(null, -1); - for (Iterator iterator = cache.asMap().keySet().iterator(); iterator.hasNext(); ) { + for (Iterator iterator = cache.keys().iterator(); iterator.hasNext(); ) { Key key = iterator.next(); if (currentFullClean.contains(key.shard)) { iterator.remove(); @@ -487,7 +466,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis } } - cache.cleanUp(); + cache.refresh(); currentKeysToClean.clear(); currentFullClean.clear(); } diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 2a2aef4d691..6612b9f4e8e 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -19,12 +19,15 @@ package org.elasticsearch.indices.fielddata.cache; -import com.google.common.cache.*; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.util.Accountable; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.cache.RemovalListener; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; @@ -43,6 +46,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.List; +import java.util.function.ToLongBiFunction; /** */ @@ -66,17 +70,11 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL this.indicesFieldDataCacheListener = indicesFieldDataCacheListener; final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1"); final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes(); - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() + CacheBuilder cacheBuilder = CacheBuilder.builder() .removalListener(this); if (sizeInBytes > 0) { - cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); + cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); } - // defaults to 4, but this is a busy map for all indices, increase it a bit by default - final int concurrencyLevel = settings.getAsInt(FIELDDATA_CACHE_CONCURRENCY_LEVEL, 16); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); - } - cacheBuilder.concurrencyLevel(concurrencyLevel); logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes)); cache = cacheBuilder.build(); @@ -108,7 +106,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final Accountable value = notification.getValue(); for (IndexFieldDataCache.Listener listener : key.listeners) { try { - listener.onRemoval(key.shardId, indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), value.ramBytesUsed()); + listener.onRemoval(key.shardId, indexCache.fieldNames, indexCache.fieldDataType, notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED, value.ramBytesUsed()); } catch (Throwable e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on field data cache unloading", e); @@ -116,10 +114,9 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL } } - public static class FieldDataWeigher implements Weigher { - + public static class FieldDataWeigher implements ToLongBiFunction { @Override - public int weigh(Key key, Accountable ramUsage) { + public long applyAsLong(Key key, Accountable ramUsage) { int weight = (int) Math.min(ramUsage.ramBytesUsed(), Integer.MAX_VALUE); return weight == 0 ? 1 : weight; } @@ -150,13 +147,13 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final ShardId shardId = ShardUtils.extractShardId(context.reader()); final Key key = new Key(this, context.reader().getCoreCacheKey(), shardId); //noinspection unchecked - final Accountable accountable = cache.get(key, () -> { + final Accountable accountable = cache.computeIfAbsent(key, k -> { context.reader().addCoreClosedListener(IndexFieldCache.this); for (Listener listener : this.listeners) { - key.listeners.add(listener); + k.listeners.add(listener); } final AtomicFieldData fieldData = indexFieldData.loadDirect(context); - for (Listener listener : key.listeners) { + for (Listener listener : k.listeners) { try { listener.onCache(shardId, fieldNames, fieldDataType, fieldData); } catch (Throwable e) { @@ -174,13 +171,13 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final ShardId shardId = ShardUtils.extractShardId(indexReader); final Key key = new Key(this, indexReader.getCoreCacheKey(), shardId); //noinspection unchecked - final Accountable accountable = cache.get(key, () -> { + final Accountable accountable = cache.computeIfAbsent(key, k -> { indexReader.addReaderClosedListener(IndexFieldCache.this); for (Listener listener : this.listeners) { - key.listeners.add(listener); + k.listeners.add(listener); } final Accountable ifd = (Accountable) indexFieldData.localGlobalDirect(indexReader); - for (Listener listener : key.listeners) { + for (Listener listener : k.listeners) { try { listener.onCache(shardId, fieldNames, fieldDataType, ifd); } catch (Throwable e) { @@ -207,38 +204,28 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL @Override public void clear() { - for (Key key : cache.asMap().keySet()) { + for (Key key : cache.keys()) { if (key.indexCache.index.equals(index)) { cache.invalidate(key); } } - // Note that cache invalidation in Guava does not immediately remove - // values from the cache. In the case of a cache with a rare write or - // read rate, it's possible for values to persist longer than desired. - // - // Note this is intended by the Guava developers, see: - // https://code.google.com/p/guava-libraries/wiki/CachesExplained#Eviction - // (the "When Does Cleanup Happen" section) - - // We call it explicitly here since it should be a "rare" operation, and - // if a user runs it he probably wants to see memory returned as soon as - // possible - cache.cleanUp(); + // force eviction + cache.refresh(); } @Override public void clear(String fieldName) { - for (Key key : cache.asMap().keySet()) { + for (Key key : cache.keys()) { if (key.indexCache.index.equals(index)) { if (key.indexCache.fieldNames.fullName().equals(fieldName)) { cache.invalidate(key); } } } - // we call cleanUp() because this is a manual operation, should happen + // we call refresh because this is a manual operation, should happen // rarely and probably means the user wants to see memory returned as // soon as possible - cache.cleanUp(); + cache.refresh(); } @Override @@ -305,7 +292,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL logger.trace("running periodic field data cache cleanup"); } try { - this.cache.cleanUp(); + this.cache.refresh(); } catch (Exception e) { logger.warn("Exception during periodic field data cache cleanup:", e); } diff --git a/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java index a84fff30676..90bb4c41a38 100644 --- a/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java @@ -29,12 +29,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.EngineClosedException; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.FlushNotAllowedEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -42,9 +40,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.*; import java.util.concurrent.ScheduledFuture; -/** - * - */ public class IndexingMemoryController extends AbstractLifecycleComponent { /** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */ @@ -83,6 +78,12 @@ public class IndexingMemoryController extends AbstractLifecycleComponent shardsIndicesStatus = new HashMap<>(); + // True if the shard was active last time we checked + private final Map shardWasActive = new HashMap<>(); @Override - public void run() { + public synchronized void run() { EnumSet changes = purgeDeletedAndClosedShards(); - final List activeToInactiveIndexingShards = new ArrayList<>(); - final int activeShards = updateShardStatuses(changes, activeToInactiveIndexingShards); - for (ShardId indexShard : activeToInactiveIndexingShards) { - markShardAsInactive(indexShard); - } + updateShardStatuses(changes); if (changes.isEmpty() == false) { // Something changed: recompute indexing buffers: - calcAndSetShardBuffers(activeShards, "[" + changes + "]"); + calcAndSetShardBuffers("[" + changes + "]"); } } /** - * goes through all existing shards and check whether the changes their active status - * - * @return the current count of active shards + * goes through all existing shards and check whether there are changes in their active status */ - private int updateShardStatuses(EnumSet changes, List activeToInactiveIndexingShards) { - int activeShards = 0; + private void updateShardStatuses(EnumSet changes) { for (ShardId shardId : availableShards()) { - final ShardIndexingStatus currentStatus = getTranslogStatus(shardId); + // Is the shard active now? + Boolean isActive = getShardActive(shardId); - if (currentStatus == null) { + if (isActive == null) { // shard was closed.. continue; } - ShardIndexingStatus status = shardsIndicesStatus.get(shardId); - if (status == null) { - status = currentStatus; - shardsIndicesStatus.put(shardId, status); + // Was the shard active last time we checked? + Boolean wasActive = shardWasActive.get(shardId); + + if (wasActive == null) { + // First time we are seeing this shard + shardWasActive.put(shardId, isActive); changes.add(ShardStatusChangeType.ADDED); - } else { - final boolean lastActiveIndexing = status.activeIndexing; - status.updateWith(currentTimeInNanos(), currentStatus, inactiveTime.nanos()); - if (lastActiveIndexing && (status.activeIndexing == false)) { - activeToInactiveIndexingShards.add(shardId); - changes.add(ShardStatusChangeType.BECAME_INACTIVE); - logger.debug("marking shard {} as inactive (inactive_time[{}]) indexing wise, setting size to [{}]", - shardId, - inactiveTime, EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER); - } else if ((lastActiveIndexing == false) && status.activeIndexing) { + } else if (isActive) { + // Shard is active now + if (wasActive == false) { + // Shard became active itself, since we last checked (due to new indexing op arriving) changes.add(ShardStatusChangeType.BECAME_ACTIVE); logger.debug("marking shard {} as active indexing wise", shardId); + shardWasActive.put(shardId, true); + } else if (checkIdle(shardId, inactiveTime.nanos()) == Boolean.TRUE) { + // Make shard inactive now + changes.add(ShardStatusChangeType.BECAME_INACTIVE); + logger.debug("marking shard {} as inactive (inactive_time[{}]) indexing wise", + shardId, + inactiveTime); + shardWasActive.put(shardId, false); } } - - if (status.activeIndexing) { - activeShards++; - } } - - return activeShards; } /** * purge any existing statuses that are no longer updated * - * @return true if any change + * @return the changes applied */ private EnumSet purgeDeletedAndClosedShards() { EnumSet changes = EnumSet.noneOf(ShardStatusChangeType.class); - Iterator statusShardIdIterator = shardsIndicesStatus.keySet().iterator(); + Iterator statusShardIdIterator = shardWasActive.keySet().iterator(); while (statusShardIdIterator.hasNext()) { ShardId shardId = statusShardIdIterator.next(); if (shardAvailable(shardId) == false) { @@ -364,12 +346,25 @@ public class IndexingMemoryController extends AbstractLifecycleComponent ent : shardWasActive.entrySet()) { + if (ent.getValue()) { + activeShardCount++; + } + } + + // TODO: we could be smarter here by taking into account how RAM the IndexWriter on each shard + // is actually using (using IW.ramBytesUsed), so that small indices (e.g. Marvel) would not + // get the same indexing buffer as large indices. But it quickly gets tricky... + if (activeShardCount == 0) { logger.debug("no active shards (reason={})", reason); return; } - ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / activeShards); + + ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / activeShardCount); if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) { shardIndexingBufferSize = minShardIndexBufferSize; } @@ -377,7 +372,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent ent : shardWasActive.entrySet()) { + if (ent.getValue()) { + // This shard is active + updateShardBuffers(ent.getKey(), shardIndexingBufferSize, shardTranslogBufferSize); } } } @@ -399,13 +395,14 @@ public class IndexingMemoryController extends AbstractLifecycleComponent inactiveNanoInterval) { - // shard is inactive. mark it as such. - activeIndexing = false; - } - } else if (activeIndexing == false // we weren't indexing before - && idle == false // but we do now - && current.translogNumberOfOperations > 0 // but only if we're really sure - see note bellow - ) { - // since we sync flush once a shard becomes inactive, the translog id can change, however that - // doesn't mean the an indexing operation has happened. Note that if we're really unlucky and a flush happens - // immediately after an indexing operation we may not become active immediately. The following - // indexing operation will mark the shard as active, so it's OK. If that one doesn't come, we might as well stay - // inactive - - activeIndexing = true; - idleSinceNanoTime = -1; - } - - translogId = current.translogId; - translogNumberOfOperations = current.translogNumberOfOperations; - } - } } diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 6fcbec5ee90..b97457af706 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -19,7 +19,6 @@ package org.elasticsearch.monitor.fs; -import com.google.common.collect.Iterators; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; +import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Set; @@ -235,7 +235,7 @@ public class FsInfo implements Iterable, Streamable, ToXContent { @Override public Iterator iterator() { - return Iterators.forArray(paths); + return Arrays.stream(paths).iterator(); } public static FsInfo readFsInfo(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index d2194888dbd..c695e265ac5 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -19,7 +19,6 @@ package org.elasticsearch.monitor.jvm; -import com.google.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -32,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.lang.management.*; import java.util.ArrayList; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; @@ -378,7 +378,7 @@ public class JvmStats implements Streamable, ToXContent { @Override public Iterator iterator() { - return Iterators.forArray(collectors); + return Arrays.stream(collectors).iterator(); } } @@ -546,7 +546,7 @@ public class JvmStats implements Streamable, ToXContent { @Override public Iterator iterator() { - return Iterators.forArray(pools); + return Arrays.stream(pools).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index 7aacde5283f..3f35ddf033c 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -83,42 +83,20 @@ public class InternalSettingsPreparer { initializeSettings(output, input, true); Environment environment = new Environment(output.build()); - // TODO: can we simplify all of this and have a single filename, which is looked up in the config dir? - boolean loadFromEnv = true; - if (useSystemProperties(input)) { - // if its default, then load it, but also load form env - if (Strings.hasText(System.getProperty("es.default.config"))) { - // TODO: we don't allow multiple config files, but having loadFromEnv true here allows just that - loadFromEnv = true; - output.loadFromPath(environment.configFile().resolve(System.getProperty("es.default.config"))); - } - // TODO: these should be elseifs so that multiple files cannot be loaded - // if explicit, just load it and don't load from env - if (Strings.hasText(System.getProperty("es.config"))) { - loadFromEnv = false; - output.loadFromPath(environment.configFile().resolve(System.getProperty("es.config"))); - } - if (Strings.hasText(System.getProperty("elasticsearch.config"))) { - loadFromEnv = false; - output.loadFromPath(environment.configFile().resolve(System.getProperty("elasticsearch.config"))); + boolean settingsFileFound = false; + Set foundSuffixes = new HashSet<>(); + for (String allowedSuffix : ALLOWED_SUFFIXES) { + Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix); + if (Files.exists(path)) { + if (!settingsFileFound) { + output.loadFromPath(path); + } + settingsFileFound = true; + foundSuffixes.add(allowedSuffix); } } - if (loadFromEnv) { - boolean settingsFileFound = false; - Set foundSuffixes = new HashSet<>(); - for (String allowedSuffix : ALLOWED_SUFFIXES) { - Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix); - if (Files.exists(path)) { - if (!settingsFileFound) { - output.loadFromPath(path); - } - settingsFileFound = true; - foundSuffixes.add(allowedSuffix); - } - } - if (foundSuffixes.size() > 1) { - throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ",")); - } + if (foundSuffixes.size() > 1) { + throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ",")); } // re-initialize settings now that the config file has been loaded diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index b20a54f076d..1ec1d34abc4 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; @@ -457,22 +458,22 @@ public class PercolatorService extends AbstractComponent { @Override public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { long count = 0; - Lucene.EarlyTerminatingCollector collector = Lucene.createExistsCollector(); for (Map.Entry entry : context.percolateQueries().entrySet()) { try { + Query existsQuery = entry.getValue(); if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(context.docSearcher(), entry.getValue(), collector); + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } + if (Lucene.exists(context.docSearcher(), existsQuery)) { + count ++; } } catch (Throwable e) { logger.debug("[" + entry.getKey() + "] failed to execute query", e); throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); } - - if (collector.exists()) { - count++; - } } return new PercolateShardResponse(count, context, request.shardId()); } @@ -552,7 +553,6 @@ public class PercolatorService extends AbstractComponent { long count = 0; List matches = new ArrayList<>(); List> hls = new ArrayList<>(); - Lucene.EarlyTerminatingCollector collector = Lucene.createExistsCollector(); for (Map.Entry entry : context.percolateQueries().entrySet()) { if (context.highlight() != null) { @@ -560,26 +560,27 @@ public class PercolatorService extends AbstractComponent { context.hitContext().cache().clear(); } try { + Query existsQuery = entry.getValue(); if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(context.docSearcher(), entry.getValue(), collector); + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } + if (Lucene.exists(context.docSearcher(), existsQuery)) { + if (!context.limit || count < context.size()) { + matches.add(entry.getKey()); + if (context.highlight() != null) { + highlightPhase.hitExecute(context, context.hitContext()); + hls.add(context.hitContext().hit().getHighlightFields()); + } + } + count++; } } catch (Throwable e) { logger.debug("[" + entry.getKey() + "] failed to execute query", e); throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); } - - if (collector.exists()) { - if (!context.limit || count < context.size()) { - matches.add(entry.getKey()); - if (context.highlight() != null) { - highlightPhase.hitExecute(context, context.hitContext()); - hls.add(context.hitContext().hit().getHighlightFields()); - } - } - count++; - } } BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); diff --git a/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java b/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java index dfa9f4be05f..094201c6184 100644 --- a/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java +++ b/core/src/main/java/org/elasticsearch/percolator/QueryCollector.java @@ -19,8 +19,10 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.FloatArrayList; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.*; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; @@ -54,7 +56,6 @@ abstract class QueryCollector extends SimpleCollector { final ESLogger logger; boolean isNestedDoc = false; - final Lucene.EarlyTerminatingCollector collector = Lucene.createExistsCollector(); BytesRef current; SortedBinaryDocValues values; @@ -166,6 +167,13 @@ abstract class QueryCollector extends SimpleCollector { // log??? return; } + Query existsQuery = query; + if (isNestedDoc) { + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } // run the query try { if (context.highlight() != null) { @@ -173,12 +181,7 @@ abstract class QueryCollector extends SimpleCollector { context.hitContext().cache().clear(); } - if (isNestedDoc) { - Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(searcher, query, collector); - } - if (collector.exists()) { + if (Lucene.exists(searcher, existsQuery)) { if (!limit || counter < size) { matches.add(BytesRef.deepCopyOf(current)); if (context.highlight() != null) { @@ -230,14 +233,16 @@ abstract class QueryCollector extends SimpleCollector { // log??? return; } + Query existsQuery = query; + if (isNestedDoc) { + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } // run the query try { - if (isNestedDoc) { - Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(searcher, query, collector); - } - if (collector.exists()) { + if (Lucene.exists(searcher, existsQuery)) { topDocsLeafCollector.collect(doc); postMatch(doc); } @@ -298,18 +303,20 @@ abstract class QueryCollector extends SimpleCollector { // log??? return; } + Query existsQuery = query; + if (isNestedDoc) { + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } // run the query try { if (context.highlight() != null) { context.parsedQuery(new ParsedQuery(query)); context.hitContext().cache().clear(); } - if (isNestedDoc) { - Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(searcher, query, collector); - } - if (collector.exists()) { + if (Lucene.exists(searcher, existsQuery)) { if (!limit || counter < size) { matches.add(BytesRef.deepCopyOf(current)); scores.add(scorer.score()); @@ -363,14 +370,16 @@ abstract class QueryCollector extends SimpleCollector { // log??? return; } + Query existsQuery = query; + if (isNestedDoc) { + existsQuery = new BooleanQuery.Builder() + .add(existsQuery, Occur.MUST) + .add(Queries.newNonNestedFilter(), Occur.FILTER) + .build(); + } // run the query try { - if (isNestedDoc) { - Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); - } else { - Lucene.exists(searcher, query, collector); - } - if (collector.exists()) { + if (Lucene.exists(searcher, existsQuery)) { counter++; postMatch(doc); } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java index 9e7a6fcb703..2545deb7c90 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -19,14 +19,8 @@ package org.elasticsearch.plugins; -import com.google.common.collect.Iterators; - import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Build; -import org.elasticsearch.ElasticsearchCorruptionException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.*; import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.Strings; import org.elasticsearch.common.cli.Terminal; @@ -41,21 +35,12 @@ import java.io.IOException; import java.io.OutputStream; import java.net.MalformedURLException; import java.net.URL; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.PosixFileAttributeView; import java.nio.file.attribute.PosixFilePermission; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Random; -import java.util.Set; +import java.util.*; +import java.util.stream.StreamSupport; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; @@ -90,10 +75,10 @@ public class PluginManager { "analysis-phonetic", "analysis-smartcn", "analysis-stempel", - "cloud-gce", "delete-by-query", "discovery-azure", "discovery-ec2", + "discovery-gce", "discovery-multicast", "lang-expression", "lang-groovy", @@ -225,7 +210,6 @@ public class PluginManager { } private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile) throws IOException { - // unzip plugin to a staging temp dir, named for the plugin Path tmp = Files.createTempDirectory(environment.tmpFile(), null); Path root = tmp.resolve(pluginHandle.name); @@ -255,22 +239,74 @@ public class PluginManager { terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath()); // cleanup - IOUtils.rm(tmp, pluginFile); + tryToDeletePath(terminal, tmp, pluginFile); // take care of bin/ by moving and applying permissions if needed - Path binFile = extractLocation.resolve("bin"); - if (Files.isDirectory(binFile)) { - Path toLocation = pluginHandle.binDir(environment); - terminal.println(VERBOSE, "Found bin, moving to %s", toLocation.toAbsolutePath()); - if (Files.exists(toLocation)) { - IOUtils.rm(toLocation); + Path sourcePluginBinDirectory = extractLocation.resolve("bin"); + Path destPluginBinDirectory = pluginHandle.binDir(environment); + boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory); + if (needToCopyBinDirectory) { + if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) { + tryToDeletePath(terminal, extractLocation); + throw new IOException("plugin bin directory " + destPluginBinDirectory + " is not a directory"); + } + + try { + copyBinDirectory(sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal); + } catch (IOException e) { + // rollback and remove potentially before installed leftovers + terminal.printError("Error copying bin directory [%s] to [%s], cleaning up, reason: %s", sourcePluginBinDirectory, destPluginBinDirectory, e.getMessage()); + tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment)); + throw e; + } + + } + + Path sourceConfigDirectory = extractLocation.resolve("config"); + Path destConfigDirectory = pluginHandle.configDir(environment); + boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory); + if (needToCopyConfigDirectory) { + if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) { + tryToDeletePath(terminal, extractLocation, destPluginBinDirectory); + throw new IOException("plugin config directory " + destConfigDirectory + " is not a directory"); + } + + try { + terminal.println(VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath()); + moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new"); + terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, destConfigDirectory.toAbsolutePath()); + } catch (IOException e) { + terminal.printError("Error copying config directory [%s] to [%s], cleaning up, reason: %s", sourceConfigDirectory, destConfigDirectory, e.getMessage()); + tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory); + throw e; + } + } + } + + private void tryToDeletePath(Terminal terminal, Path ... paths) { + for (Path path : paths) { + try { + IOUtils.rm(path); + } catch (IOException e) { + terminal.printError(e); + } + } + } + + private void copyBinDirectory(Path sourcePluginBinDirectory, Path destPluginBinDirectory, String pluginName, Terminal terminal) throws IOException { + boolean canCopyFromSource = Files.exists(sourcePluginBinDirectory) && Files.isReadable(sourcePluginBinDirectory) && Files.isDirectory(sourcePluginBinDirectory); + if (canCopyFromSource) { + terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath()); + if (Files.exists(destPluginBinDirectory)) { + IOUtils.rm(destPluginBinDirectory); } try { - FileSystemUtils.move(binFile, toLocation); + Files.createDirectories(destPluginBinDirectory.getParent()); + FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory); } catch (IOException e) { - throw new IOException("Could not move [" + binFile + "] to [" + toLocation + "]", e); + throw new IOException("Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]", e); } - if (Environment.getFileStore(toLocation).supportsFileAttributeView(PosixFileAttributeView.class)) { + if (Environment.getFileStore(destPluginBinDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) { // add read and execute permissions to existing perms, so execution will work. // read should generally be set already, but set it anyway: don't rely on umask... final Set executePerms = new HashSet<>(); @@ -280,7 +316,7 @@ public class PluginManager { executePerms.add(PosixFilePermission.OWNER_EXECUTE); executePerms.add(PosixFilePermission.GROUP_EXECUTE); executePerms.add(PosixFilePermission.OTHERS_EXECUTE); - Files.walkFileTree(toLocation, new SimpleFileVisitor() { + Files.walkFileTree(destPluginBinDirectory, new SimpleFileVisitor() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (attrs.isRegularFile()) { @@ -294,15 +330,7 @@ public class PluginManager { } else { terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission"); } - terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, toLocation.toAbsolutePath()); - } - - Path configFile = extractLocation.resolve("config"); - if (Files.isDirectory(configFile)) { - Path configDestLocation = pluginHandle.configDir(environment); - terminal.println(VERBOSE, "Found config, moving to %s", configDestLocation.toAbsolutePath()); - moveFilesWithoutOverwriting(configFile, configDestLocation, ".new"); - terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, configDestLocation.toAbsolutePath()); + terminal.println(VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath()); } } @@ -437,7 +465,7 @@ public class PluginManager { } try (DirectoryStream stream = Files.newDirectoryStream(environment.pluginsFile())) { - return Iterators.toArray(stream.iterator(), Path.class); + return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java index 5e374079e37..c1a39cc1432 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java @@ -58,6 +58,7 @@ public class RestNodesHotThreadsAction extends BaseRestHandler { nodesHotThreadsRequest.type(request.param("type", nodesHotThreadsRequest.type())); nodesHotThreadsRequest.interval(TimeValue.parseTimeValue(request.param("interval"), nodesHotThreadsRequest.interval(), "interval")); nodesHotThreadsRequest.snapshots(request.paramAsInt("snapshots", nodesHotThreadsRequest.snapshots())); + nodesHotThreadsRequest.timeout(request.param("timeout")); client.admin().cluster().nodesHotThreads(nodesHotThreadsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesHotThreadsResponse response) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java index aed9514e2c0..f2c51850000 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java @@ -87,6 +87,7 @@ public class RestNodesInfoAction extends BaseRestHandler { } final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds); + nodesInfoRequest.timeout(request.param("timeout")); // shortcut, dont do checks if only all is specified if (metrics.size() == 1 && metrics.contains("_all")) { nodesInfoRequest.all(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index fa146b57f06..2e3927e665e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -60,6 +60,7 @@ public class RestNodesStatsAction extends BaseRestHandler { Set metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all")); NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds); + nodesStatsRequest.timeout(request.param("timeout")); if (metrics.size() == 1 && metrics.contains("_all")) { nodesStatsRequest.all(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java index 572a48de633..975c460dda8 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java @@ -43,6 +43,7 @@ public class RestClusterStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); + clusterStatsRequest.timeout(request.param("timeout")); client.admin().cluster().clusterStats(clusterStatsRequest, new RestToXContentListener(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/template/RestRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/template/RestRenderSearchTemplateAction.java index 7e75dc17cf1..a25754d8752 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/template/RestRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/template/RestRenderSearchTemplateAction.java @@ -20,8 +20,8 @@ package org.elasticsearch.rest.action.admin.indices.validate.template; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -93,7 +93,7 @@ public class RestRenderSearchTemplateAction extends BaseRestHandler { } renderSearchTemplateRequest = new RenderSearchTemplateRequest(); renderSearchTemplateRequest.template(template); - client.admin().indices().renderSearchTemplate(renderSearchTemplateRequest, new RestBuilderListener(channel) { + client.admin().cluster().renderSearchTemplate(renderSearchTemplateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(RenderSearchTemplateResponse response, XContentBuilder builder) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 10953775fd3..87a5a9a506d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -19,11 +19,6 @@ package org.elasticsearch.script; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; - import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; @@ -41,6 +36,10 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.cache.RemovalListener; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -73,7 +72,6 @@ import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; import static java.util.Collections.unmodifiableMap; @@ -155,12 +153,12 @@ public class ScriptService extends AbstractComponent implements Closeable { this.defaultLang = settings.get(DEFAULT_SCRIPTING_LANGUAGE_SETTING, DEFAULT_LANG); - CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); + CacheBuilder cacheBuilder = CacheBuilder.builder(); if (cacheMaxSize >= 0) { - cacheBuilder.maximumSize(cacheMaxSize); + cacheBuilder.setMaximumWeight(cacheMaxSize); } if (cacheExpire != null) { - cacheBuilder.expireAfterAccess(cacheExpire.nanos(), TimeUnit.NANOSECONDS); + cacheBuilder.setExpireAfterAccess(cacheExpire.nanos()); } this.cache = cacheBuilder.removalListener(new ScriptCacheRemovalListener()).build(); @@ -303,7 +301,7 @@ public class ScriptService extends AbstractComponent implements Closeable { } String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); - CompiledScript compiledScript = cache.getIfPresent(cacheKey); + CompiledScript compiledScript = cache.get(cacheKey); if (compiledScript == null) { //Either an un-cached inline script or indexed script @@ -495,12 +493,8 @@ public class ScriptService extends AbstractComponent implements Closeable { * script has been removed from the cache */ private class ScriptCacheRemovalListener implements RemovalListener { - @Override public void onRemoval(RemovalNotification notification) { - if (logger.isDebugEnabled()) { - logger.debug("notifying script services of script removal due to: [{}]", notification.getCause()); - } scriptMetrics.onCacheEviction(); for (ScriptEngineService service : scriptEngines) { try { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java index 25c875bfd7e..5b3182da57e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java @@ -1,20 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import com.tdunning.math.stats.AVLTreeDigest; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricBuilder.java index 0614cd7fb2a..4bbb407f788 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricBuilder.java @@ -30,7 +30,7 @@ import java.util.Map; /** * Builder for the {@link ScriptedMetric} aggregation. */ -public class ScriptedMetricBuilder extends MetricsAggregationBuilder { +public class ScriptedMetricBuilder extends MetricsAggregationBuilder { private Script initScript = null; private Script mapScript = null; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java index a681bc79c9c..48686b9a6b9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregator.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.movavg; -import com.google.common.collect.EvictingQueue; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.Aggregation; @@ -102,7 +102,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); - EvictingQueue values = EvictingQueue.create(this.window); + EvictingQueue values = new EvictingQueue<>(this.window); long lastValidKey = 0; int lastValidPosition = 0; @@ -202,7 +202,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator { private MovAvgModel minimize(List buckets, InternalHistogram histo, MovAvgModel model) { int counter = 0; - EvictingQueue values = EvictingQueue.create(window); + EvictingQueue values = new EvictingQueue<>(this.window); double[] test = new double[window]; ListIterator iter = buckets.listIterator(buckets.size()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java index bb04502f60c..711ee2299cf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/SimulatedAnealingMinimizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.movavg; -import com.google.common.collect.EvictingQueue; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java index d00f064a94e..5df97d336c9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.pipeline.serialdiff; -import com.google.common.collect.EvictingQueue; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -86,7 +86,7 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); - EvictingQueue lagWindow = EvictingQueue.create(lag); + EvictingQueue lagWindow = new EvictingQueue<>(lag); int counter = 0; for (InternalHistogram.Bucket bucket : buckets) { diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 393b7b6ad8c..9e787cf2aa9 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.internal; import com.carrotsearch.hppc.IntObjectHashMap; -import com.google.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,6 +29,7 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; +import java.util.Arrays; import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; @@ -156,7 +156,7 @@ public class InternalSearchHits implements SearchHits { @Override public Iterator iterator() { - return Iterators.forArray(hits()); + return Arrays.stream(hits()).iterator(); } public InternalSearchHit[] internalHits() { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 724e3d40e25..6e7a91dffc3 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -104,7 +104,6 @@ public final class PhraseSuggester extends Suggester { response.addTerm(resultEntry); final BytesRefBuilder byteSpare = new BytesRefBuilder(); - final EarlyTerminatingCollector collector = Lucene.createExistsCollector(); final CompiledScript collateScript = suggestion.getCollateQueryScript(); final boolean collatePrune = (collateScript != null) && suggestion.collatePrune(); for (int i = 0; i < checkerResult.corrections.length; i++) { @@ -119,7 +118,7 @@ public final class PhraseSuggester extends Suggester { final ExecutableScript executable = scriptService.executable(collateScript, vars); final BytesReference querySource = (BytesReference) executable.run(); final ParsedQuery parsedQuery = suggestion.getQueryParserService().parse(querySource); - collateMatch = Lucene.exists(searcher, parsedQuery.query(), collector); + collateMatch = Lucene.exists(searcher, parsedQuery.query()); } if (!collateMatch && !collatePrune) { continue; diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index c709daad54e..9db3ef9ffa8 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -94,8 +94,6 @@ import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; @@ -131,8 +129,6 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet(newHashSet( SETTING_NUMBER_OF_SHARDS, SETTING_VERSION_CREATED, - SETTING_LEGACY_ROUTING_HASH_FUNCTION, - SETTING_LEGACY_ROUTING_USE_TYPE, SETTING_INDEX_UUID, SETTING_CREATION_DATE)); diff --git a/core/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/core/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 52134e4fc82..06b50d314be 100644 --- a/core/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/core/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1,3 +1 @@ -org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat -org.elasticsearch.search.suggest.completion.Completion090PostingsFormat -org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat +org.elasticsearch.search.suggest.completion.Completion090PostingsFormat \ No newline at end of file diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 11268245670..76b5a584226 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -52,8 +52,8 @@ grant codeBase "${es.security.plugin.discovery-ec2}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${es.security.plugin.cloud-gce}" { - // needed because of problems in cloud-gce +grant codeBase "${es.security.plugin.discovery-gce}" { + // needed because of problems in discovery-gce permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help index 0811ec7ccf2..35772158153 100644 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help @@ -38,10 +38,10 @@ OFFICIAL PLUGINS - analysis-phonetic - analysis-smartcn - analysis-stempel - - cloud-gce - delete-by-query - discovery-azure - discovery-ec2 + - discovery-gce - discovery-multicast - lang-expression - lang-groovy diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java index ef7a9ac31e8..dc176ae5620 100644 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java +++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java @@ -1,19 +1,20 @@ /* - * Licensed to Elasticsearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Elasticsearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java index 450382c1ff1..58728d8e258 100644 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java +++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java @@ -1,19 +1,20 @@ /* - * Licensed to Elasticsearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Elasticsearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java index 3b63f765f3c..1be578f1003 100644 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java +++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java @@ -1,20 +1,20 @@ /* -Licensed to Elasticsearch under one or more contributor -license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright -ownership. Elasticsearch licenses this file to you under -the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. -You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * - http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ package org.apache.lucene.search.postingshighlight; diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 9a260f033ee..55dc2e42113 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -20,7 +20,6 @@ package org.elasticsearch; import com.fasterxml.jackson.core.JsonLocation; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.lucene.util.Constants; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.RoutingMissingException; @@ -31,12 +30,7 @@ import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IllegalShardRoutingStateException; -import org.elasticsearch.cluster.routing.RoutingTableValidation; -import org.elasticsearch.cluster.routing.RoutingValidationException; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.PathUtils; @@ -55,7 +49,6 @@ import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.AlreadyExpiredException; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.CreateFailedEngineException; import org.elasticsearch.index.engine.IndexFailedEngineException; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.MergeMappingException; @@ -139,9 +132,9 @@ public class ExceptionSerializationTests extends ESTestCase { Class clazz = loadClass(filename); if (ignore.contains(clazz) == false) { if (Modifier.isAbstract(clazz.getModifiers()) == false && Modifier.isInterface(clazz.getModifiers()) == false && isEsException(clazz)) { - if (ElasticsearchException.isRegistered((Class)clazz) == false && ElasticsearchException.class.equals(clazz.getEnclosingClass()) == false) { + if (ElasticsearchException.isRegistered((Class) clazz) == false && ElasticsearchException.class.equals(clazz.getEnclosingClass()) == false) { notRegistered.add(clazz); - } else if (ElasticsearchException.isRegistered((Class)clazz)) { + } else if (ElasticsearchException.isRegistered((Class) clazz)) { registered.add(clazz); try { if (clazz.getDeclaredMethod("writeTo", StreamOutput.class) != null) { @@ -199,7 +192,7 @@ public class ExceptionSerializationTests extends ESTestCase { } public static final class TestException extends ElasticsearchException { - public TestException(StreamInput in) throws IOException{ + public TestException(StreamInput in) throws IOException { super(in); } } @@ -247,7 +240,7 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals(ex.getIndex(), "foo"); assertEquals(ex.getMessage(), "fobar"); - ex = serialize(new QueryShardException((Index)null, null, null)); + ex = serialize(new QueryShardException((Index) null, null, null)); assertNull(ex.getIndex()); assertNull(ex.getMessage()); } @@ -282,22 +275,8 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals(-3, alreadyExpiredException.now()); } - public void testCreateFailedEngineException() throws IOException { - CreateFailedEngineException ex = serialize(new CreateFailedEngineException(new ShardId("idx", 2), "type", "id", null)); - assertEquals(ex.getShardId(), new ShardId("idx", 2)); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertNull(ex.getCause()); - - ex = serialize(new CreateFailedEngineException(null, "type", "id", new NullPointerException())); - assertNull(ex.getShardId()); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertTrue(ex.getCause() instanceof NullPointerException); - } - public void testMergeMappingException() throws IOException { - MergeMappingException ex = serialize(new MergeMappingException(new String[] {"one", "two"})); + MergeMappingException ex = serialize(new MergeMappingException(new String[]{"one", "two"})); assertArrayEquals(ex.failures(), new String[]{"one", "two"}); } @@ -342,7 +321,7 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("the dude abides!", ex.name()); assertEquals("index_template [the dude abides!] already exists", ex.getMessage()); - ex = serialize(new IndexTemplateAlreadyExistsException((String)null)); + ex = serialize(new IndexTemplateAlreadyExistsException((String) null)); assertNull(ex.name()); assertEquals("index_template [null] already exists", ex.getMessage()); } @@ -449,7 +428,7 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals(ctx.shardTarget(), ex.shard()); } - public void testIllegalIndexShardStateException()throws IOException { + public void testIllegalIndexShardStateException() throws IOException { ShardId id = new ShardId("foo", 1); IndexShardState state = randomFrom(IndexShardState.values()); IllegalIndexShardStateException ex = serialize(new IllegalIndexShardStateException(id, state, "come back later buddy")); @@ -480,7 +459,7 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("baam", ex.getMessage()); assertTrue(ex.getCause() instanceof NullPointerException); assertEquals(empty.length, ex.shardFailures().length); - ShardSearchFailure[] one = new ShardSearchFailure[] { + ShardSearchFailure[] one = new ShardSearchFailure[]{ new ShardSearchFailure(new IllegalArgumentException("nono!")) }; @@ -521,7 +500,7 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("index_template [name] missing", ex.getMessage()); assertEquals("name", ex.name()); - ex = serialize(new IndexTemplateMissingException((String)null)); + ex = serialize(new IndexTemplateMissingException((String) null)); assertEquals("index_template [null] missing", ex.getMessage()); assertNull(ex.name()); } @@ -570,8 +549,8 @@ public class ExceptionSerializationTests extends ESTestCase { ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException("nono!"))); assertEquals("{\"type\":\"illegal_argument_exception\",\"reason\":\"nono!\"}", toXContent(ex)); - Throwable[] unknowns = new Throwable[] { - new JsonParseException("foobar", new JsonLocation(new Object(), 1,2,3,4)), + Throwable[] unknowns = new Throwable[]{ + new JsonParseException("foobar", new JsonLocation(new Object(), 1, 2, 3, 4)), new ClassCastException("boom boom boom"), new IOException("booom") }; @@ -609,7 +588,7 @@ public class ExceptionSerializationTests extends ESTestCase { UnknownHeaderException uhe = new UnknownHeaderException("msg", status); uhe.addHeader("foo", "foo", "bar"); - ElasticsearchException serialize = serialize((ElasticsearchException)uhe); + ElasticsearchException serialize = serialize((ElasticsearchException) uhe); assertTrue(serialize instanceof NotSerializableExceptionWrapper); NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize; assertEquals("msg", e.getMessage()); @@ -684,7 +663,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(19, org.elasticsearch.ResourceNotFoundException.class); ids.put(20, org.elasticsearch.transport.ActionTransportException.class); ids.put(21, org.elasticsearch.ElasticsearchGenerationException.class); - ids.put(22, org.elasticsearch.index.engine.CreateFailedEngineException.class); + ids.put(22, null); // was CreateFailedEngineException ids.put(23, org.elasticsearch.index.shard.IndexShardStartedException.class); ids.put(24, org.elasticsearch.search.SearchContextMissingException.class); ids.put(25, org.elasticsearch.script.ScriptException.class); @@ -716,7 +695,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(51, org.elasticsearch.index.IndexShardAlreadyExistsException.class); ids.put(52, org.elasticsearch.index.engine.VersionConflictEngineException.class); ids.put(53, org.elasticsearch.index.engine.EngineException.class); - ids.put(54, org.elasticsearch.index.engine.DocumentAlreadyExistsException.class); + ids.put(54, null); // was DocumentAlreadyExistsException, which is superseded with VersionConflictEngineException ids.put(55, org.elasticsearch.action.NoSuchNodeException.class); ids.put(56, org.elasticsearch.common.settings.SettingsException.class); ids.put(57, org.elasticsearch.indices.IndexTemplateMissingException.class); @@ -726,7 +705,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(61, org.elasticsearch.cluster.routing.RoutingValidationException.class); ids.put(62, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class); ids.put(63, org.elasticsearch.indices.AliasFilterParsingException.class); - ids.put(64, org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class); + ids.put(64, null); // DeleteByQueryFailedEngineException was removed in 3.0 ids.put(65, org.elasticsearch.gateway.GatewayException.class); ids.put(66, org.elasticsearch.index.shard.IndexShardNotRecoveringException.class); ids.put(67, org.elasticsearch.http.HttpException.class); @@ -813,7 +792,7 @@ public class ExceptionSerializationTests extends ESTestCase { } for (ElasticsearchException.ElasticsearchExceptionHandle handle : ElasticsearchException.ElasticsearchExceptionHandle.values()) { - assertEquals((int)reverse.get(handle.exceptionClass), handle.id); + assertEquals((int) reverse.get(handle.exceptionClass), handle.id); } for (Map.Entry> entry : ids.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeReallyOldIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeReallyOldIndexIT.java deleted file mode 100644 index d365f5b4eeb..00000000000 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeReallyOldIndexIT.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.upgrade; - -import org.elasticsearch.Version; -import org.elasticsearch.bwcompat.StaticIndexBackwardCompatibilityIT; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.indices.IndicesService; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; - -public class UpgradeReallyOldIndexIT extends StaticIndexBackwardCompatibilityIT { - - public void testUpgrade_0_90_6() throws Exception { - String indexName = "index-0.90.6"; - - loadIndex(indexName); - assertMinVersion(indexName, org.apache.lucene.util.Version.parse("4.5.1")); - UpgradeIT.assertNotUpgraded(client(), indexName); - assertTrue(UpgradeIT.hasAncientSegments(client(), indexName)); - assertNoFailures(client().admin().indices().prepareUpgrade(indexName).setUpgradeOnlyAncientSegments(true).get()); - - assertFalse(UpgradeIT.hasAncientSegments(client(), indexName)); - // This index has only ancient segments, so it should now be fully upgraded: - UpgradeIT.assertUpgraded(client(), indexName); - assertEquals(Version.CURRENT.luceneVersion.toString(), client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE)); - assertMinVersion(indexName, Version.CURRENT.luceneVersion); - - assertEquals(client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, IndexMetaData.SETTING_VERSION_UPGRADED), Integer.toString(Version.CURRENT.id)); - } - - public void testUpgradeConflictingMapping() throws Exception { - String indexName = "index-conflicting-mappings-1.7.0"; - logger.info("Checking static index " + indexName); - Settings nodeSettings = prepareBackwardsDataDir(getDataPath(indexName + ".zip")); - try { - internalCluster().startNode(nodeSettings); - fail("Should have failed to start the node"); - } catch (Exception ex) { - assertThat(ex.getMessage(), containsString("conflicts with existing mapping in other types")); - } - } - - private void assertMinVersion(String index, org.apache.lucene.util.Version version) { - for (IndicesService services : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = services.indexService(index); - if (indexService != null) { - assertEquals(version, indexService.getShardOrNull(0).minimumCompatibleVersion()); - } - } - - } - -} diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 1cdea965424..7c08a0db359 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -18,12 +18,18 @@ */ package org.elasticsearch.action.index; +import org.elasticsearch.index.VersionType; import org.elasticsearch.test.ESTestCase; import org.junit.Test; -import static org.hamcrest.Matchers.equalTo; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import static org.hamcrest.Matchers.*; /** - */ + */ public class IndexRequestTests extends ESTestCase { @Test @@ -39,9 +45,23 @@ public class IndexRequestTests extends ESTestCase { assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX)); } - @Test(expected= IllegalArgumentException.class) - public void testReadBogusString(){ + @Test(expected = IllegalArgumentException.class) + public void testReadBogusString() { String foobar = "foobar"; IndexRequest.OpType.fromString(foobar); } + + public void testCreateOperationRejectsVersions() { + Set allButInternalSet = new HashSet<>(Arrays.asList(VersionType.values())); + allButInternalSet.remove(VersionType.INTERNAL); + VersionType[] allButInternal = allButInternalSet.toArray(new VersionType[]{}); + IndexRequest request = new IndexRequest("index", "type", "1"); + request.opType(IndexRequest.OpType.CREATE); + request.versionType(randomFrom(allButInternal)); + assertThat(request.validate().validationErrors(), not(empty())); + + request.versionType(VersionType.INTERNAL); + request.version(randomIntBetween(0, Integer.MAX_VALUE)); + assertThat(request.validate().validationErrors(), not(empty())); + } } diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java new file mode 100644 index 00000000000..fce431238dd --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -0,0 +1,316 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.single.instance; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportService; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class TransportInstanceSingleOperationActionTests extends ESTestCase { + + private static ThreadPool THREAD_POOL; + + private TestClusterService clusterService; + private CapturingTransport transport; + private TransportService transportService; + + private TestTransportInstanceSingleOperationAction action; + + public static class Request extends InstanceShardOperationRequest { + public Request() { + } + } + + public static class Response extends ActionResponse { + public Response() { + } + } + + class TestTransportInstanceSingleOperationAction extends TransportInstanceSingleOperationAction { + private final Map shards = new HashMap<>(); + + public TestTransportInstanceSingleOperationAction(Settings settings, String actionName, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { + super(settings, actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request); + } + + public Map getResults() { + return shards; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected void shardOperation(Request request, ActionListener listener) { + throw new UnsupportedOperationException("Not implemented in test class"); + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected boolean resolveRequest(ClusterState state, Request request, ActionListener listener) { + return true; + } + + @Override + protected ShardIterator shards(ClusterState clusterState, Request request) { + return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId).primaryShardIt(); + } + } + + class MyResolver extends IndexNameExpressionResolver { + public MyResolver() { + super(Settings.EMPTY); + } + + @Override + public String[] concreteIndices(ClusterState state, IndicesRequest request) { + return request.indices(); + } + } + + @BeforeClass + public static void startThreadPool() { + THREAD_POOL = new ThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = new TestClusterService(THREAD_POOL); + transportService = new TransportService(transport, THREAD_POOL); + transportService.start(); + action = new TestTransportInstanceSingleOperationAction( + Settings.EMPTY, + "indices:admin/test", + transportService, + new ActionFilters(new HashSet()), + new MyResolver(), + Request::new + ); + } + + @AfterClass + public static void destroyThreadPool() { + ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); + // since static must set to null to be eligible for collection + THREAD_POOL = null; + } + + public void testGlobalBlock() { + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + try { + action.new AsyncSingleAction(request, listener).start(); + listener.get(); + fail("expected ClusterBlockException"); + } catch (Throwable t) { + if (ExceptionsHelper.unwrap(t, ClusterBlockException.class) == null) { + logger.info("expected ClusterBlockException but got ", t); + fail("expected ClusterBlockException"); + } + } + } + + public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { + Request request = new Request().index("test"); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testFailureWithoutRetry() throws Exception { + Request request = new Request().index("test"); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + // this should not trigger retry or anything and the listener should report exception immediately + transport.handleResponse(requestId, new TransportException("a generic transport exception", new Exception("generic test exception"))); + + try { + // result should return immediately + assertTrue(listener.isDone()); + listener.get(); + fail("this should fail with a transport exception"); + } catch (ExecutionException t) { + if (ExceptionsHelper.unwrap(t, TransportException.class) == null) { + logger.info("expected TransportException but got ", t); + fail("expected and TransportException"); + } + } + } + + public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { + Request request = new Request().index("test"); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + boolean local = randomBoolean(); + clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); + action.new AsyncSingleAction(request, listener).start(); + // this should fail because primary not initialized + assertThat(transport.capturedRequests().length, equalTo(0)); + clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + // this time it should work + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testSuccessAfterRetryWithExcpetionFromTransport() throws Exception { + Request request = new Request().index("test"); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + boolean local = randomBoolean(); + clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); + transport.handleResponse(requestId, new ConnectTransportException(node, "test exception")); + // trigger cluster state observer + clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + assertThat(transport.capturedRequests().length, equalTo(1)); + transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); + listener.get(); + } + + public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { + Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(1)); + long requestId = transport.capturedRequests()[0].requestId; + transport.clear(); + DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); + transport.handleResponse(requestId, new ConnectTransportException(node, "test exception")); + + // wait until the timeout was triggered and we actually tried to send for the second time + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(transport.capturedRequests().length, equalTo(1)); + } + }); + + // let it fail the second time too + requestId = transport.capturedRequests()[0].requestId; + transport.handleResponse(requestId, new ConnectTransportException(node, "test exception")); + try { + // result should return immediately + assertTrue(listener.isDone()); + listener.get(); + fail("this should fail with a transport exception"); + } catch (ExecutionException t) { + if (ExceptionsHelper.unwrap(t, ConnectTransportException.class) == null) { + logger.info("expected ConnectTransportException but got ", t); + fail("expected and ConnectTransportException"); + } + } + } + + public void testUnresolvableRequestDoesNotHang() throws InterruptedException, ExecutionException, TimeoutException { + action = new TestTransportInstanceSingleOperationAction( + Settings.EMPTY, + "indices:admin/test_unresolvable", + transportService, + new ActionFilters(new HashSet()), + new MyResolver(), + Request::new + ) { + @Override + protected boolean resolveRequest(ClusterState state, Request request, ActionListener listener) { + return false; + } + }; + Request request = new Request().index("test"); + request.shardId = 0; + PlainActionFuture listener = new PlainActionFuture<>(); + clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + action.new AsyncSingleAction(request, listener).start(); + assertThat(transport.capturedRequests().length, equalTo(0)); + try { + listener.get(); + } catch (Throwable t) { + if (ExceptionsHelper.unwrap(t, IllegalStateException.class) == null) { + logger.info("expected IllegalStateException but got ", t); + fail("expected and IllegalStateException"); + } + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 8e71f3d18b7..990c399d8ee 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -246,7 +246,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { SortedSet expectedVersions = new TreeSet<>(); for (Version v : VersionUtils.allVersions()) { if (v.snapshot()) continue; // snapshots are unreleased, so there is no backcompat yet - if (v.onOrBefore(Version.V_0_20_6)) continue; // we can only test back one major lucene version + if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself expectedVersions.add("index-" + v.toString() + ".zip"); } @@ -312,7 +312,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { client().admin().indices().prepareOpen(indexName).get(); fail("Shouldn't be able to open an old index"); } catch (IllegalStateException ex) { - assertThat(ex.getMessage(), containsString("was created before v0.90.0 and wasn't upgraded")); + assertThat(ex.getMessage(), containsString("was created before v2.0.0.beta1 and wasn't upgraded")); } unloadIndex(indexName); logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds"); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 486267bf70c..895748514d4 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -36,7 +36,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati internalCluster().startNode(nodeSettings); fail(); } catch (Exception ex) { - assertThat(ex.getMessage(), containsString(" was created before v0.90.0 and wasn't upgraded")); + assertThat(ex.getMessage(), containsString(" was created before v2.0.0.beta1 and wasn't upgraded")); } } } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 9ef4238e3b9..740b185e745 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -95,9 +95,8 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { if (Modifier.isStatic(field.getModifiers()) && field.getType() == Version.class) { Version v = (Version) field.get(Version.class); if (v.snapshot()) continue; - if (v.onOrBefore(Version.V_1_0_0_Beta1)) continue; + if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; if (v.equals(Version.CURRENT)) continue; - expectedVersions.add(v.toString()); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java deleted file mode 100644 index 88f27bcf609..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.metadata; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.elasticsearch.Version; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.store.IndexStoreModule; -import org.elasticsearch.test.ESTestCase; - -import java.util.Arrays; -import java.util.Locale; - -public class MetaDataIndexUpgradeServiceTests extends ESTestCase { - - public void testUpgradeStoreSettings() { - final String type = RandomPicks.randomFrom(random(), Arrays.asList("nio_fs", "mmap_fs", "simple_fs", "default", "fs")); - MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(Settings.EMPTY, null); - Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexStoreModule.STORE_TYPE, randomBoolean() ? type : type.toUpperCase(Locale.ROOT)) - .build(); - IndexMetaData test = IndexMetaData.builder("test") - .settings(indexSettings) - .numberOfShards(1) - .numberOfReplicas(1) - .build(); - IndexMetaData indexMetaData = metaDataIndexUpgradeService.upgradeSettings(test); - assertEquals(type.replace("_", ""), indexMetaData.getSettings().get(IndexStoreModule.STORE_TYPE)); - } - - public void testNoStoreSetting() { - Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - IndexMetaData test = IndexMetaData.builder("test") - .settings(indexSettings) - .numberOfShards(1) - .numberOfReplicas(1) - .build(); - MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(Settings.EMPTY, null); - IndexMetaData indexMetaData = metaDataIndexUpgradeService.upgradeSettings(test); - assertSame(indexMetaData, test); - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java index a2dbf786812..29281e256f6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java @@ -38,43 +38,32 @@ import java.util.Arrays; public class RoutingBackwardCompatibilityTests extends ESTestCase { public void testBackwardCompatibility() throws Exception { - Path baseDir = createTempDir(); - Node node = new Node(Settings.builder().put("path.home", baseDir.toString()).build()); - try { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class.getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) { - for (String line = reader.readLine(); line != null; line = reader.readLine()) { - if (line.startsWith("#")) { // comment - continue; - } - String[] parts = line.split("\t"); - assertEquals(Arrays.toString(parts), 7, parts.length); - final String index = parts[0]; - final int numberOfShards = Integer.parseInt(parts[1]); - final String type = parts[2]; - final String id = parts[3]; - final String routing = "null".equals(parts[4]) ? null : parts[4]; - final int pre20ExpectedShardId = Integer.parseInt(parts[5]); - final int currentExpectedShard = Integer.parseInt(parts[6]); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class.getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) { + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + if (line.startsWith("#")) { // comment + continue; + } + String[] parts = line.split("\t"); + assertEquals(Arrays.toString(parts), 7, parts.length); + final String index = parts[0]; + final int numberOfShards = Integer.parseInt(parts[1]); + final String type = parts[2]; + final String id = parts[3]; + final String routing = "null".equals(parts[4]) ? null : parts[4]; + final int pre20ExpectedShardId = Integer.parseInt(parts[5]); // not needed anymore - old hashing is gone + final int currentExpectedShard = Integer.parseInt(parts[6]); - OperationRouting operationRouting = node.injector().getInstance(OperationRouting.class); - for (Version version : VersionUtils.allVersions()) { - final Settings settings = settings(version).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards).numberOfReplicas(randomInt(3)).build(); - MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false); - RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); - final int shardId = operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId(); - if (version.before(Version.V_2_0_0_beta1)) { - assertEquals(pre20ExpectedShardId, shardId); - } else { - assertEquals(currentExpectedShard, shardId); - } - } + OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, null); + for (Version version : VersionUtils.allVersions()) { + final Settings settings = settings(version).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards).numberOfReplicas(randomInt(3)).build(); + MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false); + RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); + final int shardId = operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId(); + assertEquals(currentExpectedShard, shardId); } } - } finally { - node.close(); } } - } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeIT.java deleted file mode 100644 index bff1977545a..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeIT.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.ESIntegTestCase; - -import java.nio.file.Path; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0) -@LuceneTestCase.SuppressFileSystems("*") // extra files break the single data cluster expectation when unzipping the static index -public class RoutingBackwardCompatibilityUponUpgradeIT extends ESIntegTestCase { - - public void testDefaultRouting() throws Exception { - test("default_routing_1_x", DjbHashFunction.class, false); - } - - public void testCustomRouting() throws Exception { - test("custom_routing_1_x", SimpleHashFunction.class, true); - } - - private void test(String name, Class expectedHashFunction, boolean expectedUseType) throws Exception { - Path zippedIndexDir = getDataPath("/org/elasticsearch/cluster/routing/" + name + ".zip"); - Settings baseSettings = prepareBackwardsDataDir(zippedIndexDir); - internalCluster().startNode(Settings.builder() - .put(baseSettings) - .put(Node.HTTP_ENABLED, true) - .build()); - ensureYellow("test"); - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get(); - assertArrayEquals(new String[] {"test"}, getIndexResponse.indices()); - GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); - assertEquals(expectedHashFunction.getName(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION)); - assertEquals(Boolean.valueOf(expectedUseType).toString(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE)); - SearchResponse allDocs = client().prepareSearch("test").get(); - assertSearchResponse(allDocs); - assertHitCount(allDocs, 4); - // Make sure routing works - for (SearchHit hit : allDocs.getHits().hits()) { - GetResponse get = client().prepareGet(hit.index(), hit.type(), hit.id()).get(); - assertTrue(get.isExists()); - } - } - -} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java index ed454aead0d..4dcc5acd811 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java @@ -24,8 +24,6 @@ import org.elasticsearch.test.ESTestCase; public class Murmur3HashFunctionTests extends ESTestCase { - private static Murmur3HashFunction HASH = new Murmur3HashFunction(); - public void testKnownValues() { assertHash(0x5a0cb7c3, "hell"); assertHash(0xd7c31989, "hello"); @@ -37,6 +35,6 @@ public class Murmur3HashFunctionTests extends ESTestCase { } private static void assertHash(int expected, String stringInput) { - assertEquals(expected, HASH.hash(stringInput)); + assertEquals(expected, Murmur3HashFunction.hash(stringInput)); } } diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java new file mode 100644 index 00000000000..d1481a5ad5b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -0,0 +1,536 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static org.hamcrest.CoreMatchers.instanceOf; + +public class CacheTests extends ESTestCase { + private int numberOfEntries; + + @Before + public void setUp() throws Exception { + super.setUp(); + numberOfEntries = randomIntBetween(1000, 10000); + logger.debug("numberOfEntries: " + numberOfEntries); + } + + // cache some entries, then randomly lookup keys that do not exist, then check the stats + public void testCacheStats() { + AtomicLong evictions = new AtomicLong(); + Set keys = new HashSet<>(); + Cache cache = + CacheBuilder.builder() + .setMaximumWeight(numberOfEntries / 2) + .removalListener(notification -> { + keys.remove(notification.getKey()); + evictions.incrementAndGet(); + }) + .build(); + + for (int i = 0; i < numberOfEntries; i++) { + // track the keys, which will be removed upon eviction (see the RemovalListener) + keys.add(i); + cache.put(i, Integer.toString(i)); + } + long hits = 0; + long misses = 0; + Integer missingKey = 0; + for (Integer key : keys) { + --missingKey; + if (rarely()) { + misses++; + cache.get(missingKey); + } else { + hits++; + cache.get(key); + } + } + assertEquals(hits, cache.stats().getHits()); + assertEquals(misses, cache.stats().getMisses()); + assertEquals((long) Math.ceil(numberOfEntries / 2.0), evictions.get()); + assertEquals(evictions.get(), cache.stats().getEvictions()); + } + + // cache some entries in batches of size maximumWeight; for each batch, touch the even entries to affect the + // ordering; upon the next caching of entries, the entries from the previous batch will be evicted; we can then + // check that the evicted entries were evicted in LRU order (first the odds in a batch, then the evens in a batch) + // for each batch + public void testCacheEvictions() { + int maximumWeight = randomIntBetween(1, numberOfEntries); + AtomicLong evictions = new AtomicLong(); + List evictedKeys = new ArrayList<>(); + Cache cache = + CacheBuilder.builder() + .setMaximumWeight(maximumWeight) + .removalListener(notification -> { + evictions.incrementAndGet(); + evictedKeys.add(notification.getKey()); + }) + .build(); + // cache entries up to numberOfEntries - maximumWeight; all of these entries will ultimately be evicted in + // batches of size maximumWeight, first the odds in the batch, then the evens in the batch + List expectedEvictions = new ArrayList<>(); + int iterations = (int)Math.ceil((numberOfEntries - maximumWeight) / (1.0 * maximumWeight)); + for (int i = 0; i < iterations; i++) { + for (int j = i * maximumWeight; j < (i + 1) * maximumWeight && j < numberOfEntries - maximumWeight; j++) { + cache.put(j, Integer.toString(j)); + if (j % 2 == 1) { + expectedEvictions.add(j); + } + } + for (int j = i * maximumWeight; j < (i + 1) * maximumWeight && j < numberOfEntries - maximumWeight; j++) { + if (j % 2 == 0) { + cache.get(j); + expectedEvictions.add(j); + } + } + } + // finish filling the cache + for (int i = numberOfEntries - maximumWeight; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + assertEquals(numberOfEntries - maximumWeight, evictions.get()); + assertEquals(evictions.get(), cache.stats().getEvictions()); + + // assert that the keys were evicted in LRU order + Set keys = new HashSet<>(); + List remainingKeys = new ArrayList<>(); + for (Integer key : cache.keys()) { + keys.add(key); + remainingKeys.add(key); + } + assertEquals(expectedEvictions.size(), evictedKeys.size()); + for (int i = 0; i < expectedEvictions.size(); i++) { + assertFalse(keys.contains(expectedEvictions.get(i))); + assertEquals(expectedEvictions.get(i), evictedKeys.get(i)); + } + for (int i = numberOfEntries - maximumWeight; i < numberOfEntries; i++) { + assertTrue(keys.contains(i)); + assertEquals( + numberOfEntries - i + (numberOfEntries - maximumWeight) - 1, + (int) remainingKeys.get(i - (numberOfEntries - maximumWeight)) + ); + } + } + + // cache some entries and exceed the maximum weight, then check that the cache has the expected weight and the + // expected evictions occurred + public void testWeigher() { + int maximumWeight = 2 * numberOfEntries; + int weight = randomIntBetween(2, 10); + AtomicLong evictions = new AtomicLong(); + Cache cache = + CacheBuilder.builder() + .setMaximumWeight(maximumWeight) + .weigher((k, v) -> weight) + .removalListener(notification -> evictions.incrementAndGet()) + .build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + // cache weight should be the largest multiple of weight less than maximumWeight + assertEquals(weight * (maximumWeight / weight), cache.weight()); + + // the number of evicted entries should be the number of entries that fit in the excess weight + assertEquals((int) Math.ceil((weight - 2) * numberOfEntries / (1.0 * weight)), evictions.get()); + + assertEquals(evictions.get(), cache.stats().getEvictions()); + } + + // cache some entries, randomly invalidate some of them, then check that the weight of the cache is correct + public void testWeight() { + Cache cache = + CacheBuilder.builder() + .weigher((k, v) -> k) + .build(); + int weight = 0; + for (int i = 0; i < numberOfEntries; i++) { + weight += i; + cache.put(i, Integer.toString(i)); + } + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + weight -= i; + cache.invalidate(i); + } + } + assertEquals(weight, cache.weight()); + } + + // cache some entries, randomly invalidate some of them, then check that the number of cached entries is correct + public void testCount() { + Cache cache = CacheBuilder.builder().build(); + int count = 0; + for (int i = 0; i < numberOfEntries; i++) { + count++; + cache.put(i, Integer.toString(i)); + } + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + count--; + cache.invalidate(i); + } + } + assertEquals(count, cache.count()); + } + + // cache some entries, step the clock forward, cache some more entries, step the clock forward and then check that + // the first batch of cached entries expired and were removed + public void testExpirationAfterAccess() { + AtomicLong now = new AtomicLong(); + Cache cache = new Cache() { + @Override + protected long now() { + return now.get(); + } + }; + cache.setExpireAfterAccess(1); + List evictedKeys = new ArrayList<>(); + cache.setRemovalListener(notification -> { + assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); + evictedKeys.add(notification.getKey()); + }); + now.set(0); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + now.set(1); + for (int i = numberOfEntries; i < 2 * numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + now.set(2); + cache.refresh(); + assertEquals(numberOfEntries, cache.count()); + for (int i = 0; i < evictedKeys.size(); i++) { + assertEquals(i, (int) evictedKeys.get(i)); + } + Set remainingKeys = new HashSet<>(); + for (Integer key : cache.keys()) { + remainingKeys.add(key); + } + for (int i = numberOfEntries; i < 2 * numberOfEntries; i++) { + assertTrue(remainingKeys.contains(i)); + } + } + + public void testExpirationAfterWrite() { + AtomicLong now = new AtomicLong(); + Cache cache = new Cache() { + @Override + protected long now() { + return now.get(); + } + }; + cache.setExpireAfterWrite(1); + List evictedKeys = new ArrayList<>(); + cache.setRemovalListener(notification -> { + assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); + evictedKeys.add(notification.getKey()); + }); + now.set(0); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + now.set(1); + for (int i = numberOfEntries; i < 2 * numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + now.set(2); + for (int i = 0; i < numberOfEntries; i++) { + cache.get(i); + } + cache.refresh(); + assertEquals(numberOfEntries, cache.count()); + for (int i = 0; i < evictedKeys.size(); i++) { + assertEquals(i, (int) evictedKeys.get(i)); + } + Set remainingKeys = new HashSet<>(); + for (Integer key : cache.keys()) { + remainingKeys.add(key); + } + for (int i = numberOfEntries; i < 2 * numberOfEntries; i++) { + assertTrue(remainingKeys.contains(i)); + } + } + + // randomly promote some entries, step the clock forward, then check that the promoted entries remain and the + // non-promoted entries were removed + public void testPromotion() { + AtomicLong now = new AtomicLong(); + Cache cache = new Cache() { + @Override + protected long now() { + return now.get(); + } + }; + cache.setExpireAfterAccess(1); + now.set(0); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + now.set(1); + Set promotedKeys = new HashSet<>(); + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + cache.get(i); + promotedKeys.add(i); + } + } + now.set(2); + cache.refresh(); + assertEquals(promotedKeys.size(), cache.count()); + for (int i = 0; i < numberOfEntries; i++) { + if (promotedKeys.contains(i)) { + assertNotNull(cache.get(i)); + } else { + assertNull(cache.get(i)); + } + } + } + + + // randomly invalidate some cached entries, then check that a lookup for each of those and only those keys is null + public void testInvalidate() { + Cache cache = CacheBuilder.builder().build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + Set keys = new HashSet<>(); + for (Integer key : cache.keys()) { + if (rarely()) { + cache.invalidate(key); + keys.add(key); + } + } + for (int i = 0; i < numberOfEntries; i++) { + if (keys.contains(i)) { + assertNull(cache.get(i)); + } else { + assertNotNull(cache.get(i)); + } + } + } + + // randomly invalidate some cached entries, then check that we receive invalidate notifications for those and only + // those entries + public void testNotificationOnInvalidate() { + Set notifications = new HashSet<>(); + Cache cache = + CacheBuilder.builder() + .removalListener(notification -> { + assertEquals(RemovalNotification.RemovalReason.INVALIDATED, notification.getRemovalReason()); + notifications.add(notification.getKey()); + }) + .build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + Set invalidated = new HashSet<>(); + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + cache.invalidate(i); + invalidated.add(i); + } + } + assertEquals(notifications, invalidated); + } + + // invalidate all cached entries, then check that the cache is empty + public void testInvalidateAll() { + Cache cache = CacheBuilder.builder().build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + cache.invalidateAll(); + assertEquals(0, cache.count()); + assertEquals(0, cache.weight()); + } + + // invalidate all cached entries, then check that we receive invalidate notifications for all entries + public void testNotificationOnInvalidateAll() { + Set notifications = new HashSet<>(); + Cache cache = + CacheBuilder.builder() + .removalListener(notification -> { + assertEquals(RemovalNotification.RemovalReason.INVALIDATED, notification.getRemovalReason()); + notifications.add(notification.getKey()); + }) + .build(); + Set invalidated = new HashSet<>(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + invalidated.add(i); + } + cache.invalidateAll(); + assertEquals(invalidated, notifications); + } + + // randomly replace some entries, increasing the weight by 1 for each replacement, then count that the cache size + // is correct + public void testReplaceRecomputesSize() { + class Key { + private int key; + private long weight; + + public Key(int key, long weight) { + this.key = key; + this.weight = weight; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Key key1 = (Key) o; + + return key == key1.key; + + } + + @Override + public int hashCode() { + return key; + } + } + Cache cache = CacheBuilder.builder().weigher((k, s) -> k.weight).build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(new Key(i, 1), Integer.toString(i)); + } + assertEquals(numberOfEntries, cache.count()); + assertEquals(numberOfEntries, cache.weight()); + int replaced = 0; + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + replaced++; + cache.put(new Key(i, 2), Integer.toString(i)); + } + } + assertEquals(numberOfEntries, cache.count()); + assertEquals(numberOfEntries + replaced, cache.weight()); + } + + // randomly replace some entries, then check that we received replacement notifications for those and only those + // entries + public void testNotificationOnReplace() { + Set notifications = new HashSet<>(); + Cache cache = + CacheBuilder.builder() + .removalListener(notification -> { + assertEquals(RemovalNotification.RemovalReason.REPLACED, notification.getRemovalReason()); + notifications.add(notification.getKey()); + }) + .build(); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + Set replacements = new HashSet<>(); + for (int i = 0; i < numberOfEntries; i++) { + if (rarely()) { + cache.put(i, Integer.toString(i) + Integer.toString(i)); + replacements.add(i); + } + } + assertEquals(replacements, notifications); + } + + public void testComputeIfAbsentCallsOnce() throws InterruptedException { + int numberOfThreads = randomIntBetween(2, 200); + final Cache cache = CacheBuilder.builder().build(); + List threads = new ArrayList<>(); + AtomicReferenceArray flags = new AtomicReferenceArray(numberOfEntries); + for (int j = 0; j < numberOfEntries; j++) { + flags.set(j, false); + } + CountDownLatch latch = new CountDownLatch(1 + numberOfThreads); + for (int i = 0; i < numberOfThreads; i++) { + Thread thread = new Thread(() -> { + latch.countDown(); + for (int j = 0; j < numberOfEntries; j++) { + try { + cache.computeIfAbsent(j, key -> { + assertTrue(flags.compareAndSet(key, false, true)); + return Integer.toString(key); + }); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + } + }); + threads.add(thread); + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } + + public void testComputeIfAbsentThrowsExceptionIfLoaderReturnsANullValue() { + final Cache cache = CacheBuilder.builder().build(); + try { + cache.computeIfAbsent(1, k -> null); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NullPointerException.class)); + } + } + + // test that the cache is not corrupted under lots of concurrent modifications, even hitting the same key + // here be dragons: this test did catch one subtle bug during development; do not remove lightly + public void testTorture() throws InterruptedException { + int numberOfThreads = randomIntBetween(2, 200); + final Cache cache = + CacheBuilder.builder() + .setMaximumWeight(1000) + .weigher((k, v) -> 2) + .build(); + + CountDownLatch latch = new CountDownLatch(1 + numberOfThreads); + List threads = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + Thread thread = new Thread(() -> { + Random random = new Random(random().nextLong()); + latch.countDown(); + for (int j = 0; j < numberOfEntries; j++) { + Integer key = random.nextInt(numberOfEntries); + cache.put(key, Integer.toString(j)); + } + }); + threads.add(thread); + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + cache.refresh(); + assertEquals(500, cache.count()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java b/core/src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java new file mode 100644 index 00000000000..de822b8aa83 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2012 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.elasticsearch.common.collect; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.NoSuchElementException; + +public class EvictingQueueTests extends ESTestCase { + public void testCreateWithNegativeSize() throws Exception { + try { + new EvictingQueue<>(-1); + fail(); + } catch (IllegalArgumentException expected) { + } + } + + public void testCreateWithZeroSize() throws Exception { + EvictingQueue queue = new EvictingQueue<>(0); + assertEquals(0, queue.size()); + + assertTrue(queue.add("hi")); + assertEquals(0, queue.size()); + + assertTrue(queue.offer("hi")); + assertEquals(0, queue.size()); + + assertFalse(queue.remove("hi")); + assertEquals(0, queue.size()); + + try { + queue.element(); + fail(); + } catch (NoSuchElementException expected) {} + + assertNull(queue.peek()); + assertNull(queue.poll()); + try { + queue.remove(); + fail(); + } catch (NoSuchElementException expected) {} + } + + public void testRemainingCapacityMaximumSizeZero() { + EvictingQueue queue = new EvictingQueue<>(0); + assertEquals(0, queue.remainingCapacity()); + } + + public void testRemainingCapacityMaximumSizeOne() { + EvictingQueue queue = new EvictingQueue<>(1); + assertEquals(1, queue.remainingCapacity()); + queue.add("hi"); + assertEquals(0, queue.remainingCapacity()); + } + + public void testRemainingCapacityMaximumSizeThree() { + EvictingQueue queue = new EvictingQueue<>(3); + assertEquals(3, queue.remainingCapacity()); + queue.add("hi"); + assertEquals(2, queue.remainingCapacity()); + queue.add("hi"); + assertEquals(1, queue.remainingCapacity()); + queue.add("hi"); + assertEquals(0, queue.remainingCapacity()); + } + + public void testEvictingAfterOne() throws Exception { + EvictingQueue queue = new EvictingQueue<>(1); + assertEquals(0, queue.size()); + assertEquals(1, queue.remainingCapacity()); + + assertTrue(queue.add("hi")); + assertEquals("hi", queue.element()); + assertEquals("hi", queue.peek()); + assertEquals(1, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertTrue(queue.add("there")); + assertEquals("there", queue.element()); + assertEquals("there", queue.peek()); + assertEquals(1, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertEquals("there", queue.remove()); + assertEquals(0, queue.size()); + assertEquals(1, queue.remainingCapacity()); + } + + public void testEvictingAfterThree() throws Exception { + EvictingQueue queue = new EvictingQueue<>(3); + assertEquals(0, queue.size()); + assertEquals(3, queue.remainingCapacity()); + + assertTrue(queue.add("one")); + assertTrue(queue.add("two")); + assertTrue(queue.add("three")); + assertEquals("one", queue.element()); + assertEquals("one", queue.peek()); + assertEquals(3, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertTrue(queue.add("four")); + assertEquals("two", queue.element()); + assertEquals("two", queue.peek()); + assertEquals(3, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertEquals("two", queue.remove()); + assertEquals(2, queue.size()); + assertEquals(1, queue.remainingCapacity()); + } + + public void testAddAll() throws Exception { + EvictingQueue queue = new EvictingQueue<>(3); + assertEquals(0, queue.size()); + assertEquals(3, queue.remainingCapacity()); + + assertTrue(queue.addAll(CollectionUtils.arrayAsArrayList("one", "two", "three"))); + assertEquals("one", queue.element()); + assertEquals("one", queue.peek()); + assertEquals(3, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertTrue(queue.addAll(Collections.singletonList("four"))); + assertEquals("two", queue.element()); + assertEquals("two", queue.peek()); + assertEquals(3, queue.size()); + assertEquals(0, queue.remainingCapacity()); + + assertEquals("two", queue.remove()); + assertEquals(2, queue.size()); + assertEquals(1, queue.remainingCapacity()); + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/core/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java new file mode 100644 index 00000000000..90972185e0b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.collect; + +import org.elasticsearch.test.ESTestCase; + +import java.util.*; + +public class IteratorsTests extends ESTestCase { + public void testConcatentation() { + List threeTwoOne = Arrays.asList(3, 2, 1); + List fourFiveSix = Arrays.asList(4, 5, 6); + Iterator concat = Iterators.concat(threeTwoOne.iterator(), fourFiveSix.iterator()); + assertContainsInOrder(concat, 3, 2, 1, 4, 5, 6); + } + + public void testNoConcatenation() { + Iterator iterator = Iterators.concat(); + assertEmptyIterator(iterator); + } + + public void testEmptyConcatenation() { + Iterator iterator = Iterators.concat(empty()); + assertEmptyIterator(iterator); + } + + public void testMultipleEmptyConcatenation() { + Iterator iterator = Iterators.concat(empty(), empty()); + assertEmptyIterator(iterator); + } + + public void testSingleton() { + int value = randomInt(); + assertSingleton(value, singletonIterator(value)); + } + + public void testEmptyBeforeSingleton() { + int value = randomInt(); + assertSingleton(value, empty(), singletonIterator(value)); + } + + + public void testEmptyAfterSingleton() { + int value = randomInt(); + assertSingleton(value, singletonIterator(value), empty()); + } + + public void testRandomSingleton() { + int numberOfIterators = randomIntBetween(1, 1000); + int singletonIndex = randomIntBetween(0, numberOfIterators - 1); + int value = randomInt(); + Iterator[] iterators = new Iterator[numberOfIterators]; + for (int i = 0; i < numberOfIterators; i++) { + iterators[i] = i != singletonIndex ? empty() : singletonIterator(value); + } + assertSingleton(value, iterators); + } + + public void testRandomIterators() { + int numberOfIterators = randomIntBetween(1, 1000); + Iterator[] iterators = new Iterator[numberOfIterators]; + List values = new ArrayList<>(); + for (int i = 0; i < numberOfIterators; i++) { + int numberOfValues = randomIntBetween(0, 256); + List theseValues = new ArrayList<>(); + for (int j = 0; j < numberOfValues; j++) { + int value = randomInt(); + values.add(value); + theseValues.add(value); + } + iterators[i] = theseValues.iterator(); + } + assertContainsInOrder(Iterators.concat(iterators), values.toArray(new Integer[values.size()])); + } + + public void testTwoEntries() { + int first = randomInt(); + int second = randomInt(); + Iterator concat = Iterators.concat(singletonIterator(first), empty(), empty(), singletonIterator(second)); + assertContainsInOrder(concat, first, second); + } + + public void testNull() { + try { + Iterators.concat((Iterator)null); + fail("expected " + NullPointerException.class.getSimpleName()); + } catch (NullPointerException e) { + + } + } + + public void testNullIterator() { + try { + Iterators.concat(singletonIterator(1), empty(), null, empty(), singletonIterator(2)); + fail("expected " + NullPointerException.class.getSimpleName()); + } catch (NullPointerException e) { + + } + } + + private Iterator singletonIterator(T value) { + return Collections.singleton(value).iterator(); + } + + private void assertSingleton(T value, Iterator... iterators) { + Iterator concat = Iterators.concat(iterators); + assertContainsInOrder(concat, value); + } + + private Iterator empty() { + return new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public T next() { + throw new NoSuchElementException(); + } + }; + } + + private void assertContainsInOrder(Iterator iterator, T... values) { + for (T value : values) { + assertTrue(iterator.hasNext()); + assertEquals(value, iterator.next()); + } + assertNoSuchElementException(iterator); + } + + private void assertEmptyIterator(Iterator iterator) { + assertFalse(iterator.hasNext()); + assertNoSuchElementException(iterator); + } + + private void assertNoSuchElementException(Iterator iterator) { + try { + iterator.next(); + fail("expected " + NoSuchElementException.class.getSimpleName()); + } catch (NoSuchElementException e) { + + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java index 255def77eb2..9b327fb3112 100644 --- a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -60,22 +60,6 @@ public abstract class ModuleTestCase extends ESTestCase { fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); } -// /** Configures the module and asserts "instance" is bound to "to". */ -// public void assertInstanceBinding(Module module, Class to, Object instance) { -// List elements = Elements.getElements(module); -// for (Element element : elements) { -// if (element instanceof ProviderInstanceBinding) { -// assertEquals(instance, ((ProviderInstanceBinding) element).getProviderInstance().get()); -// return; -// } -// } -// StringBuilder s = new StringBuilder(); -// for (Element element : elements) { -// s.append(element + "\n"); -// } -// fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); -// } - /** * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * caught, containing the given messages diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index afc17ce004b..2b37359b2f6 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.io.stream; import org.apache.lucene.util.Constants; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; @@ -478,4 +479,23 @@ public class BytesStreamsTests extends ESTestCase { getRandom().nextBytes(data); return data; } + + public void testReadWriteGeoPoint() throws IOException { + { + BytesStreamOutput out = new BytesStreamOutput(); + GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); + out.writeGenericValue(geoPoint); + StreamInput wrap = StreamInput.wrap(out.bytes()); + GeoPoint point = (GeoPoint) wrap.readGenericValue(); + assertEquals(point, geoPoint); + } + { + BytesStreamOutput out = new BytesStreamOutput(); + GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); + out.writeGeoPoint(geoPoint); + StreamInput wrap = StreamInput.wrap(out.bytes()); + GeoPoint point = wrap.readGeoPoint(); + assertEquals(point, geoPoint); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 2fb90c776da..13ac6fd0a6f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -20,10 +20,14 @@ package org.elasticsearch.common.lucene; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; @@ -322,4 +326,37 @@ public class LuceneTests extends ESTestCase { writer.close(); dir.close(); } + + public void testCount() throws Exception { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); + + try (DirectoryReader reader = w.getReader()) { + // match_all does not match anything on an empty index + IndexSearcher searcher = newSearcher(reader); + assertFalse(Lucene.exists(searcher, new MatchAllDocsQuery())); + } + + Document doc = new Document(); + w.addDocument(doc); + + doc.add(new StringField("foo", "bar", Store.NO)); + w.addDocument(doc); + + try (DirectoryReader reader = w.getReader()) { + IndexSearcher searcher = newSearcher(reader); + assertTrue(Lucene.exists(searcher, new MatchAllDocsQuery())); + assertFalse(Lucene.exists(searcher, new TermQuery(new Term("baz", "bar")))); + assertTrue(Lucene.exists(searcher, new TermQuery(new Term("foo", "bar")))); + } + + w.deleteDocuments(new Term("foo", "bar")); + try (DirectoryReader reader = w.getReader()) { + IndexSearcher searcher = newSearcher(reader); + assertFalse(Lucene.exists(searcher, new TermQuery(new Term("foo", "bar")))); + } + + w.close(); + dir.close(); + } } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java index cdf7db9ad2d..43e151e4867 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java @@ -44,23 +44,23 @@ public class MultiPhrasePrefixQueryTests extends ESTestCase { MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery(); query.add(new Term("field", "aa")); - assertThat(Lucene.count(searcher, query), equalTo(1l)); + assertThat(searcher.count(query), equalTo(1)); query = new MultiPhrasePrefixQuery(); query.add(new Term("field", "aaa")); query.add(new Term("field", "bb")); - assertThat(Lucene.count(searcher, query), equalTo(1l)); + assertThat(searcher.count(query), equalTo(1)); query = new MultiPhrasePrefixQuery(); query.setSlop(1); query.add(new Term("field", "aaa")); query.add(new Term("field", "cc")); - assertThat(Lucene.count(searcher, query), equalTo(1l)); + assertThat(searcher.count(query), equalTo(1)); query = new MultiPhrasePrefixQuery(); query.setSlop(1); query.add(new Term("field", "xxx")); - assertThat(Lucene.count(searcher, query), equalTo(0l)); + assertThat(searcher.count(query), equalTo(0)); } @Test diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java index 5db7e7e9b36..119c595ea9b 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java @@ -65,7 +65,7 @@ public class MoreLikeThisQueryTests extends ESTestCase { mltQuery.setLikeText("lucene"); mltQuery.setMinTermFrequency(1); mltQuery.setMinDocFreq(1); - long count = Lucene.count(searcher, mltQuery); + long count = searcher.count(mltQuery); assertThat(count, equalTo(2l)); reader.close(); diff --git a/core/src/test/java/org/elasticsearch/common/network/InetAddressesTests.java b/core/src/test/java/org/elasticsearch/common/network/InetAddressesTests.java new file mode 100644 index 00000000000..2aa284dd843 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/network/InetAddressesTests.java @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2008 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.test.ESTestCase; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +public class InetAddressesTests extends ESTestCase { + public void testForStringBogusInput() { + String[] bogusInputs = { + "", + "016.016.016.016", + "016.016.016", + "016.016", + "016", + "000.000.000.000", + "000", + "0x0a.0x0a.0x0a.0x0a", + "0x0a.0x0a.0x0a", + "0x0a.0x0a", + "0x0a", + "42.42.42.42.42", + "42.42.42", + "42.42", + "42", + "42..42.42", + "42..42.42.42", + "42.42.42.42.", + "42.42.42.42...", + ".42.42.42.42", + "...42.42.42.42", + "42.42.42.-0", + "42.42.42.+0", + ".", + "...", + "bogus", + "bogus.com", + "192.168.0.1.com", + "12345.67899.-54321.-98765", + "257.0.0.0", + "42.42.42.-42", + "3ffe::1.net", + "3ffe::1::1", + "1::2::3::4:5", + "::7:6:5:4:3:2:", // should end with ":0" + ":6:5:4:3:2:1::", // should begin with "0:" + "2001::db:::1", + "FEDC:9878", + "+1.+2.+3.4", + "1.2.3.4e0", + "::7:6:5:4:3:2:1:0", // too many parts + "7:6:5:4:3:2:1:0::", // too many parts + "9:8:7:6:5:4:3::2:1", // too many parts + "0:1:2:3::4:5:6:7", // :: must remove at least one 0. + "3ffe:0:0:0:0:0:0:0:1", // too many parts (9 instead of 8) + "3ffe::10000", // hextet exceeds 16 bits + "3ffe::goog", + "3ffe::-0", + "3ffe::+0", + "3ffe::-1", + ":", + ":::", + "::1.2.3", + "::1.2.3.4.5", + "::1.2.3.4:", + "1.2.3.4::", + "2001:db8::1:", + ":2001:db8::1", + ":1:2:3:4:5:6:7", + "1:2:3:4:5:6:7:", + ":1:2:3:4:5:6:" + }; + + for (int i = 0; i < bogusInputs.length; i++) { + try { + InetAddresses.forString(bogusInputs[i]); + fail("IllegalArgumentException expected for '" + bogusInputs[i] + "'"); + } catch (IllegalArgumentException expected) { + // expected behavior + } + assertFalse(InetAddresses.isInetAddress(bogusInputs[i])); + } + } + + public void test3ff31() { + try { + InetAddresses.forString("3ffe:::1"); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException expected) { + // expected behavior + } + assertFalse(InetAddresses.isInetAddress("016.016.016.016")); + } + + public void testForStringIPv4Input() throws UnknownHostException { + String ipStr = "192.168.0.1"; + InetAddress ipv4Addr = null; + // Shouldn't hit DNS, because it's an IP string literal. + ipv4Addr = InetAddress.getByName(ipStr); + assertEquals(ipv4Addr, InetAddresses.forString(ipStr)); + assertTrue(InetAddresses.isInetAddress(ipStr)); + } + + public void testForStringIPv6Input() throws UnknownHostException { + String ipStr = "3ffe::1"; + InetAddress ipv6Addr = null; + // Shouldn't hit DNS, because it's an IP string literal. + ipv6Addr = InetAddress.getByName(ipStr); + assertEquals(ipv6Addr, InetAddresses.forString(ipStr)); + assertTrue(InetAddresses.isInetAddress(ipStr)); + } + + public void testForStringIPv6EightColons() throws UnknownHostException { + String[] eightColons = { + "::7:6:5:4:3:2:1", + "::7:6:5:4:3:2:0", + "7:6:5:4:3:2:1::", + "0:6:5:4:3:2:1::", + }; + + for (int i = 0; i < eightColons.length; i++) { + InetAddress ipv6Addr = null; + // Shouldn't hit DNS, because it's an IP string literal. + ipv6Addr = InetAddress.getByName(eightColons[i]); + assertEquals(ipv6Addr, InetAddresses.forString(eightColons[i])); + assertTrue(InetAddresses.isInetAddress(eightColons[i])); + } + } + + public void testConvertDottedQuadToHex() throws UnknownHostException { + String[] ipStrings = {"7::0.128.0.127", "7::0.128.0.128", + "7::128.128.0.127", "7::0.128.128.127"}; + + for (String ipString : ipStrings) { + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv6Addr = InetAddress.getByName(ipString); + assertEquals(ipv6Addr, InetAddresses.forString(ipString)); + assertTrue(InetAddresses.isInetAddress(ipString)); + } + } + + public void testToAddrStringIPv4() { + // Don't need to test IPv4 much; it just calls getHostAddress(). + assertEquals("1.2.3.4", + InetAddresses.toAddrString( + InetAddresses.forString("1.2.3.4"))); + } + + public void testToAddrStringIPv6() { + assertEquals("1:2:3:4:5:6:7:8", + InetAddresses.toAddrString( + InetAddresses.forString("1:2:3:4:5:6:7:8"))); + assertEquals("2001:0:0:4::8", + InetAddresses.toAddrString( + InetAddresses.forString("2001:0:0:4:0:0:0:8"))); + assertEquals("2001::4:5:6:7:8", + InetAddresses.toAddrString( + InetAddresses.forString("2001:0:0:4:5:6:7:8"))); + assertEquals("2001:0:3:4:5:6:7:8", + InetAddresses.toAddrString( + InetAddresses.forString("2001:0:3:4:5:6:7:8"))); + assertEquals("0:0:3::ffff", + InetAddresses.toAddrString( + InetAddresses.forString("0:0:3:0:0:0:0:ffff"))); + assertEquals("::4:0:0:0:ffff", + InetAddresses.toAddrString( + InetAddresses.forString("0:0:0:4:0:0:0:ffff"))); + assertEquals("::5:0:0:ffff", + InetAddresses.toAddrString( + InetAddresses.forString("0:0:0:0:5:0:0:ffff"))); + assertEquals("1::4:0:0:7:8", + InetAddresses.toAddrString( + InetAddresses.forString("1:0:0:4:0:0:7:8"))); + assertEquals("::", + InetAddresses.toAddrString( + InetAddresses.forString("0:0:0:0:0:0:0:0"))); + assertEquals("::1", + InetAddresses.toAddrString( + InetAddresses.forString("0:0:0:0:0:0:0:1"))); + assertEquals("2001:658:22a:cafe::", + InetAddresses.toAddrString( + InetAddresses.forString("2001:0658:022a:cafe::"))); + assertEquals("::102:304", + InetAddresses.toAddrString( + InetAddresses.forString("::1.2.3.4"))); + } + + public void testToUriStringIPv4() { + String ipStr = "1.2.3.4"; + InetAddress ip = InetAddresses.forString(ipStr); + assertEquals("1.2.3.4", InetAddresses.toUriString(ip)); + } + + public void testToUriStringIPv6() { + // Unfortunately the InetAddress.toString() method for IPv6 addresses + // does not collapse contiguous shorts of zeroes with the :: abbreviation. + String ipStr = "3ffe::1"; + InetAddress ip = InetAddresses.forString(ipStr); + assertEquals("[3ffe::1]", InetAddresses.toUriString(ip)); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkAddressTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkAddressTests.java index 5847bb75eeb..b53a56ac851 100644 --- a/core/src/test/java/org/elasticsearch/common/network/NetworkAddressTests.java +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkAddressTests.java @@ -22,9 +22,11 @@ package org.elasticsearch.common.network; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.Random; /** * Tests for network address formatting. Please avoid using any methods that cause DNS lookups! @@ -84,6 +86,32 @@ public class NetworkAddressTests extends ESTestCase { assertEquals("[::1]:1234", NetworkAddress.formatAddress(new InetSocketAddress(forgeScoped(null, "::1", 5), 1234))); assertEquals("[::1]:1234", NetworkAddress.formatAddress(new InetSocketAddress(forgeScoped("localhost", "::1", 5), 1234))); } + + /** Test that ipv4 address formatting round trips */ + public void testRoundTripV4() throws Exception { + byte bytes[] = new byte[4]; + Random random = random(); + for (int i = 0; i < 10000; i++) { + random.nextBytes(bytes); + InetAddress expected = Inet4Address.getByAddress(bytes); + String formatted = NetworkAddress.formatAddress(expected); + InetAddress actual = InetAddress.getByName(formatted); + assertEquals(expected, actual); + } + } + + /** Test that ipv6 address formatting round trips */ + public void testRoundTripV6() throws Exception { + byte bytes[] = new byte[16]; + Random random = random(); + for (int i = 0; i < 10000; i++) { + random.nextBytes(bytes); + InetAddress expected = Inet6Address.getByAddress(bytes); + String formatted = NetworkAddress.formatAddress(expected); + InetAddress actual = InetAddress.getByName(formatted); + assertEquals(expected, actual); + } + } /** creates address without any lookups. hostname can be null, for missing */ private InetAddress forge(String hostname, String address) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 23cf83b0d1c..4ce1a7c4630 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent.builder; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -351,4 +352,16 @@ public class XContentBuilderTests extends ESTestCase { " foobar: \"boom\"\n", string); } + public void testRenderGeoPoint() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + builder.startObject().field("foo").value(new GeoPoint(1,2)).endObject(); + String string = builder.string(); + assertEquals("{\n" + + " \"foo\" : {\n" + + " \"lat\" : 1.0,\n" + + " \"lon\" : 2.0\n" + + " }\n" + + "}", string.trim()); + } + } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 2ac69005eb4..ca95e50685f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -441,7 +441,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits()); try { id = Integer.toString(idGenerator.incrementAndGet()); - int shard = ((InternalTestCluster) cluster()).getInstance(DjbHashFunction.class).hash(id) % numPrimaries; + int shard = Murmur3HashFunction.hash(id) % numPrimaries; logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard); IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}").setTimeout("1s").get(); assertThat(response.getVersion(), equalTo(1l)); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 98630edd176..1d51c308869 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.gateway; -import com.google.common.collect.Iterators; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; @@ -33,6 +31,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.xcontent.ToXContent; @@ -59,6 +58,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -535,7 +535,7 @@ public class MetaDataStateFormatTests extends ESTestCase { public Path[] content(String glob, Path dir) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(dir, glob)) { - return Iterators.toArray(stream.iterator(), Path.class); + return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]); } } diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java index 55b104d14b8..b26e3ec220a 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -25,11 +25,7 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.get.GetRequestBuilder; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.get.MultiGetRequestBuilder; -import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.get.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -53,14 +49,7 @@ import java.util.Set; import static java.util.Collections.singleton; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; public class GetActionIT extends ESIntegTestCase { @@ -600,7 +589,7 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict, current [1], provided [2]")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); //Version from Lucene index @@ -623,7 +612,7 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict, current [1], provided [2]")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -648,7 +637,7 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict, current [2], provided [1]")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); @@ -674,7 +663,7 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict, current [2], provided [1]")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index a54be1766f8..dd73e41c9f0 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -24,6 +24,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; @@ -36,6 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShadowIndexShard; +import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.plugins.Plugin; @@ -175,6 +178,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, true) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) @@ -188,6 +192,21 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); + assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); + assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); + for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { + IndexService indexService = service.indexService(IDX); + if (indexService != null) { + IndexShard shard = indexService.getShard(0); + TranslogStats translogStats = shard.translogStats(); + assertTrue(translogStats != null || shard instanceof ShadowIndexShard); + if (translogStats != null) { + assertEquals(2, translogStats.estimatedNumberOfOperations()); + } + } + } + // Check that we can get doc 1 and 2, because we are doing realtime // gets and getting from the primary GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(true).setFields("foo").get(); diff --git a/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java index e4a97d2a4be..3f7ea542305 100644 --- a/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java @@ -29,26 +29,31 @@ public class VersionTypeTests extends ESTestCase { @Test public void testInternalVersionConflict() throws Exception { - assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(10, Versions.MATCH_ANY)); // if we don't have a version in the index we accept everything - assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, 10)); - assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, Versions.MATCH_ANY)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, Versions.MATCH_ANY, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, Versions.MATCH_ANY)); // if we didn't find a version (but the index does support it), we don't like it unless MATCH_ANY - assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); + assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); assertTrue(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, 10)); - assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_ANY)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_ANY, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY)); + // deletes + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_DELETED, true)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_DELETED, true)); + + // and the stupid usual case - assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, 10)); + assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, 10, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(10, 10)); - assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(9, 10)); + assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(9, 10, randomBoolean())); assertTrue(VersionType.INTERNAL.isVersionConflictForReads(9, 10)); - assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(10, 9)); + assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(10, 9, randomBoolean())); assertTrue(VersionType.INTERNAL.isVersionConflictForReads(10, 9)); // Old indexing code, dictating behavior @@ -99,23 +104,23 @@ public class VersionTypeTests extends ESTestCase { @Test public void testExternalVersionConflict() throws Exception { - assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); - assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10)); + assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); + assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean())); // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value - assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY)); + assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean())); // if we didn't find a version (but the index does support it), we always accept - assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND)); - assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); + assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND, randomBoolean())); + assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND)); assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, 10)); assertFalse(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY)); // and the standard behavior - assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 10)); - assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(9, 10)); - assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 9)); + assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 10, randomBoolean())); + assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(9, 10, randomBoolean())); + assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 9, randomBoolean())); assertFalse(VersionType.EXTERNAL.isVersionConflictForReads(10, 10)); assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(9, 10)); @@ -137,14 +142,14 @@ public class VersionTypeTests extends ESTestCase { @Test public void testExternalGTEVersionConflict() throws Exception { - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_SET, 10)); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean())); // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value - assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, Versions.MATCH_ANY)); + assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean())); // if we didn't find a version (but the index does support it), we always accept - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND)); - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND, randomBoolean())); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND)); assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(Versions.NOT_FOUND, 10)); @@ -152,9 +157,9 @@ public class VersionTypeTests extends ESTestCase { // and the standard behavior - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 10)); - assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(9, 10)); - assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 9)); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 10, randomBoolean())); + assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(9, 10, randomBoolean())); + assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 9, randomBoolean())); assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForReads(10, 10)); assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(9, 10)); @@ -166,14 +171,20 @@ public class VersionTypeTests extends ESTestCase { @Test public void testForceVersionConflict() throws Exception { - assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); - assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_SET, 10)); - // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value - assertTrue(VersionType.FORCE.isVersionConflictForWrites(10, Versions.MATCH_ANY)); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean())); + + // MATCH_ANY must throw an exception in the case of force version, as the version must be set! it used as the new value + try { + VersionType.FORCE.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean()); + fail(); + } catch (IllegalStateException e) { + //yes!! + } // if we didn't find a version (but the index does support it), we always accept - assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND)); - assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10)); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND, randomBoolean())); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean())); assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND)); assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, 10)); @@ -181,9 +192,9 @@ public class VersionTypeTests extends ESTestCase { // and the standard behavior - assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 10)); - assertFalse(VersionType.FORCE.isVersionConflictForWrites(9, 10)); - assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 9)); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 10, randomBoolean())); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(9, 10, randomBoolean())); + assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 9, randomBoolean())); assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 10)); assertFalse(VersionType.FORCE.isVersionConflictForReads(9, 10)); assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 9)); diff --git a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index 6a9608619ff..c781a58b905 100644 --- a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -96,7 +96,7 @@ public class BitSetFilterCacheTests extends ESTestCase { // now cached assertThat(matchCount(filter, reader), equalTo(3)); // There are 3 segments - assertThat(cache.getLoadedFilters().size(), equalTo(3l)); + assertThat(cache.getLoadedFilters().weight(), equalTo(3L)); writer.forceMerge(1); reader.close(); @@ -108,12 +108,12 @@ public class BitSetFilterCacheTests extends ESTestCase { // now cached assertThat(matchCount(filter, reader), equalTo(3)); // Only one segment now, so the size must be 1 - assertThat(cache.getLoadedFilters().size(), equalTo(1l)); + assertThat(cache.getLoadedFilters().weight(), equalTo(1L)); reader.close(); writer.close(); // There is no reference from readers and writer to any segment in the test index, so the size in the fbs cache must be 0 - assertThat(cache.getLoadedFilters().size(), equalTo(0l)); + assertThat(cache.getLoadedFilters().weight(), equalTo(0L)); } public void testListener() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java b/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java deleted file mode 100644 index a4285e6f81e..00000000000 --- a/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.codec.postingformat; - -import com.google.common.collect.Iterators; -import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.FilterLeafReader; -import org.apache.lucene.index.SegmentWriteState; -import org.elasticsearch.common.util.BloomFilter; -import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat; -import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer; -import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.Iterator; -import java.util.stream.StreamSupport; - -/** read-write version with blooms for testing */ -public class Elasticsearch090RWPostingsFormat extends Elasticsearch090PostingsFormat { - @Override - public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - final PostingsFormat delegate = getDefaultWrapped(); - final BloomFilteredFieldsConsumer fieldsConsumer = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT) { - @Override - public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - return new BloomFilteredFieldsConsumer(delegate.fieldsConsumer(state), state,delegate); - } - }.fieldsConsumer(state); - return new FieldsConsumer() { - - @Override - public void write(Fields fields) throws IOException { - - Fields maskedFields = new FilterLeafReader.FilterFields(fields) { - @Override - public Iterator iterator() { - return StreamSupport.stream(this.in.spliterator(), false).filter(UID_FIELD_FILTER.negate()).iterator(); - } - }; - fieldsConsumer.getDelegate().write(maskedFields); - maskedFields = new FilterLeafReader.FilterFields(fields) { - @Override - public Iterator iterator() { - return Iterators.singletonIterator(UidFieldMapper.NAME); - } - }; - // only go through bloom for the UID field - fieldsConsumer.write(maskedFields); - } - - @Override - public void close() throws IOException { - fieldsConsumer.close(); - } - }; - } -} diff --git a/core/src/test/java/org/elasticsearch/index/codec/postingformat/PostingsFormatTests.java b/core/src/test/java/org/elasticsearch/index/codec/postingformat/PostingsFormatTests.java deleted file mode 100644 index f9884452c62..00000000000 --- a/core/src/test/java/org/elasticsearch/index/codec/postingformat/PostingsFormatTests.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.codec.postingformat; - -import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.index.BasePostingsFormatTestCase; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; -import org.apache.lucene.util.TimeUnits; -import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; - -/** Runs elasticsearch postings format against lucene's standard postings format tests */ -@Listeners({ - ReproduceInfoPrinter.class -}) -@TimeoutSuite(millis = TimeUnits.HOUR) -@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public class PostingsFormatTests extends BasePostingsFormatTestCase { - - @Override - protected Codec getCodec() { - return TestUtil.alwaysPostingsFormat(new Elasticsearch090RWPostingsFormat()); - } - -} diff --git a/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java index 583b0f5d551..362ee9c5332 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java +++ b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.search.Query; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.hamcrest.Description; import org.hamcrest.Matcher; @@ -46,7 +45,7 @@ public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher latestGetResult = new AtomicReference<>(); latestGetResult.set(engine.get(new Engine.Get(true, newUid("1")))); @@ -597,7 +594,7 @@ public class InternalEngineTests extends ESTestCase { Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); // its not there... searchResult = engine.acquireSearcher("test"); @@ -689,7 +686,7 @@ public class InternalEngineTests extends ESTestCase { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED)); // its not there... searchResult = engine.acquireSearcher("test"); @@ -750,7 +747,7 @@ public class InternalEngineTests extends ESTestCase { // create a document ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); // its not there... searchResult = engine.acquireSearcher("test"); @@ -786,7 +783,7 @@ public class InternalEngineTests extends ESTestCase { new LogByteSizeMergePolicy()), false)) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); Engine.CommitId commitID = engine.flush(); assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId()))); byte[] wrongBytes = Base64.decode(commitID.toString()); @@ -794,7 +791,7 @@ public class InternalEngineTests extends ESTestCase { Engine.CommitId wrongId = new Engine.CommitId(wrongBytes); assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId), Engine.SyncedFlushResult.COMMIT_MISMATCH); - engine.create(new Engine.Create(newUid("2"), doc)); + engine.index(new Engine.Index(newUid("2"), doc)); assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID), Engine.SyncedFlushResult.PENDING_OPERATIONS); commitID = engine.flush(); @@ -808,7 +805,7 @@ public class InternalEngineTests extends ESTestCase { public void testSycnedFlushSurvivesEngineRestart() throws IOException { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS); @@ -827,14 +824,14 @@ public class InternalEngineTests extends ESTestCase { public void testSycnedFlushVanishesOnReplay() throws IOException { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); doc = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), new BytesArray("{}"), null); - engine.create(new Engine.Create(newUid("2"), doc)); + engine.index(new Engine.Index(newUid("2"), doc)); EngineConfig config = engine.config(); engine.close(); final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); @@ -851,27 +848,15 @@ public class InternalEngineTests extends ESTestCase { @Test public void testVersioningNewCreate() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Create create = new Engine.Create(newUid("1"), doc); - engine.create(create); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED); + engine.index(create); assertThat(create.version(), equalTo(1l)); - create = new Engine.Create(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); - replicaEngine.create(create); + create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); + replicaEngine.index(create); assertThat(create.version(), equalTo(1l)); } - @Test - public void testExternalVersioningNewCreate() { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Create create = new Engine.Create(newUid("1"), doc, 12, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, 0); - engine.create(create); - assertThat(create.version(), equalTo(12l)); - - create = new Engine.Create(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0); - replicaEngine.create(create); - assertThat(create.version(), equalTo(12l)); - } - @Test public void testVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); @@ -1136,9 +1121,9 @@ public class InternalEngineTests extends ESTestCase { } // we shouldn't be able to create as well - Engine.Create create = new Engine.Create(newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); try { - engine.create(create); + engine.index(create); } catch (VersionConflictEngineException e) { // all is well } @@ -1193,9 +1178,9 @@ public class InternalEngineTests extends ESTestCase { } // we shouldn't be able to create as well - Engine.Create create = new Engine.Create(newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); try { - engine.create(create); + engine.index(create); } catch (VersionConflictEngineException e) { // all is well } @@ -1204,15 +1189,15 @@ public class InternalEngineTests extends ESTestCase { @Test public void testVersioningCreateExistsException() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Create create = new Engine.Create(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); - engine.create(create); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + engine.index(create); assertThat(create.version(), equalTo(1l)); - create = new Engine.Create(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); + create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); try { - engine.create(create); + engine.index(create); fail(); - } catch (DocumentAlreadyExistsException e) { + } catch (VersionConflictEngineException e) { // all is well } } @@ -1220,17 +1205,17 @@ public class InternalEngineTests extends ESTestCase { @Test public void testVersioningCreateExistsExceptionWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Create create = new Engine.Create(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); - engine.create(create); + Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); + engine.index(create); assertThat(create.version(), equalTo(1l)); engine.flush(); - create = new Engine.Create(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); + create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0); try { - engine.create(create); + engine.index(create); fail(); - } catch (DocumentAlreadyExistsException e) { + } catch (VersionConflictEngineException e) { // all is well } } @@ -1394,13 +1379,13 @@ public class InternalEngineTests extends ESTestCase { try { // First, with DEBUG, which should NOT log IndexWriter output: ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: rootLogger.setLevel(Level.TRACE); - engine.create(new Engine.Create(newUid("2"), doc)); + engine.index(new Engine.Index(newUid("2"), doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); @@ -1429,14 +1414,14 @@ public class InternalEngineTests extends ESTestCase { try { // First, with DEBUG, which should NOT log IndexWriter output: ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(newUid("1"), doc)); + engine.index(new Engine.Index(newUid("1"), doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: iwIFDLogger.setLevel(Level.TRACE); - engine.create(new Engine.Create(newUid("2"), doc)); + engine.index(new Engine.Index(newUid("2"), doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); @@ -1636,8 +1621,8 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); } engine.refresh("test"); @@ -1689,8 +1674,8 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); } engine.refresh("test"); @@ -1730,10 +1715,6 @@ public class InternalEngineTests extends ESTestCase { Collections.shuffle(indexes, random()); for (Path indexFile : indexes.subList(0, scaledRandomIntBetween(1, indexes.size() / 2))) { final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT); - Version version = Version.fromString(indexName.replace("index-", "")); - if (version.onOrAfter(Version.V_2_0_0_beta1)) { - continue; - } Path unzipDir = createTempDir(); Path unzipDataDir = unzipDir.resolve("data"); // decompress the index @@ -1753,11 +1734,9 @@ public class InternalEngineTests extends ESTestCase { assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog)); Path[] tlogFiles = filterExtraFSFiles(FileSystemUtils.files(translog)); - assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 1); + assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 2); // ckp & tlog + Path tlogFile = tlogFiles[0].getFileName().toString().endsWith("tlog") ? tlogFiles[0] : tlogFiles[1]; final long size = Files.size(tlogFiles[0]); - - final long generation = TranslogTests.parseLegacyTranslogFile(tlogFiles[0]); - assertTrue(generation >= 1); logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); Directory directory = newFSDirectory(src.resolve("0").resolve("index")); Store store = createStore(directory); @@ -1790,8 +1769,8 @@ public class InternalEngineTests extends ESTestCase { final int numExtraDocs = randomIntBetween(1, 10); for (int i = 0; i < numExtraDocs; i++) { ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), "extra" + Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); } engine.refresh("test"); @@ -1819,8 +1798,8 @@ public class InternalEngineTests extends ESTestCase { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); } engine.refresh("test"); @@ -1868,8 +1847,8 @@ public class InternalEngineTests extends ESTestCase { int randomId = randomIntBetween(numDocs + 1, numDocs + 10); String uuidValue = "test#" + Integer.toString(randomId); ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); if (flush) { engine.flush(); @@ -1914,15 +1893,15 @@ public class InternalEngineTests extends ESTestCase { public final AtomicInteger recoveredOps = new AtomicInteger(0); public TranslogHandler(String indexName, ESLogger logger) { - super(new ShardId("test", 0), null, null, null, null, logger); + super(new ShardId("test", 0), null, logger); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test"); Index index = new Index(indexName); AnalysisService analysisService = new AnalysisService(index, settings); - SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings); - MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, null); + SimilarityService similarityService = new SimilarityService(index, settings); + MapperService mapperService = new MapperService(index, settings, analysisService, similarityService, null); DocumentMapper.Builder b = new DocumentMapper.Builder(settings, rootBuilder, mapperService); - DocumentMapperParser parser = new DocumentMapperParser(settings, mapperService, analysisService, similarityLookupService, null); + DocumentMapperParser parser = new DocumentMapperParser(settings, mapperService, analysisService, similarityService, null); this.docMapper = b.build(mapperService, parser); } @@ -1936,21 +1915,14 @@ public class InternalEngineTests extends ESTestCase { protected void operationProcessed() { recoveredOps.incrementAndGet(); } - - @Override - public void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates) { - if (operation.opType() != Translog.Operation.Type.DELETE_BY_QUERY) { // we don't support del by query in this test - super.performRecoveryOperation(engine, operation, allowMappingUpdates); - } - } } public void testRecoverFromForeignTranslog() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); - Engine.Create firstIndexRequest = new Engine.Create(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime()); - engine.create(firstIndexRequest); + Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); + engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1l)); } engine.refresh("test"); @@ -1968,7 +1940,7 @@ public class InternalEngineTests extends ESTestCase { engine.close(); Translog translog = new Translog(new TranslogConfig(shardId, createTempDir(), Settings.EMPTY, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool)); - translog.add(new Translog.Create("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index b5987a92623..2c6ee40b86e 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -236,7 +236,7 @@ public class ShadowEngineTests extends ESTestCase { public void testCommitStats() { // create a doc and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); CommitStats stats1 = replicaEngine.commitStats(); assertThat(stats1.getGeneration(), greaterThan(0l)); @@ -271,10 +271,10 @@ public class ShadowEngineTests extends ESTestCase { // create a doc and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - primaryEngine.create(new Engine.Create(newUid("2"), doc2)); + primaryEngine.index(new Engine.Index(newUid("2"), doc2)); primaryEngine.refresh("test"); segments = primaryEngine.segments(false); @@ -334,7 +334,7 @@ public class ShadowEngineTests extends ESTestCase { primaryEngine.onSettingsChanged(); ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); - primaryEngine.create(new Engine.Create(newUid("3"), doc3)); + primaryEngine.index(new Engine.Index(newUid("3"), doc3)); primaryEngine.refresh("test"); segments = primaryEngine.segments(false); @@ -407,7 +407,7 @@ public class ShadowEngineTests extends ESTestCase { primaryEngine.onSettingsChanged(); ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); - primaryEngine.create(new Engine.Create(newUid("4"), doc4)); + primaryEngine.index(new Engine.Index(newUid("4"), doc4)); primaryEngine.refresh("test"); segments = primaryEngine.segments(false); @@ -441,7 +441,7 @@ public class ShadowEngineTests extends ESTestCase { assertThat(segments.isEmpty(), equalTo(true)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.refresh("test"); segments = primaryEngine.segments(true); @@ -449,10 +449,10 @@ public class ShadowEngineTests extends ESTestCase { assertThat(segments.get(0).ramTree, notNullValue()); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - primaryEngine.create(new Engine.Create(newUid("2"), doc2)); + primaryEngine.index(new Engine.Index(newUid("2"), doc2)); primaryEngine.refresh("test"); ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); - primaryEngine.create(new Engine.Create(newUid("3"), doc3)); + primaryEngine.index(new Engine.Index(newUid("3"), doc3)); primaryEngine.refresh("test"); segments = primaryEngine.segments(true); @@ -480,7 +480,7 @@ public class ShadowEngineTests extends ESTestCase { document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); try { - replicaEngine.create(new Engine.Create(newUid("1"), doc)); + replicaEngine.index(new Engine.Index(newUid("1"), doc)); fail("should have thrown an exception"); } catch (UnsupportedOperationException e) {} replicaEngine.refresh("test"); @@ -517,7 +517,7 @@ public class ShadowEngineTests extends ESTestCase { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.flush(); replicaEngine.refresh("test"); @@ -573,7 +573,7 @@ public class ShadowEngineTests extends ESTestCase { ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); @@ -700,7 +700,7 @@ public class ShadowEngineTests extends ESTestCase { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); @@ -784,7 +784,7 @@ public class ShadowEngineTests extends ESTestCase { // create a document ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); @@ -830,7 +830,7 @@ public class ShadowEngineTests extends ESTestCase { @Test public void testFailEngineOnCorruption() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.flush(); MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); leaf.setRandomIOExceptionRate(1.0); @@ -869,7 +869,7 @@ public class ShadowEngineTests extends ESTestCase { public void testFailStart() throws IOException { // Need a commit point for this ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - primaryEngine.create(new Engine.Create(newUid("1"), doc)); + primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.flush(); // this test fails if any reader, searcher or directory is not closed - MDW FTW @@ -957,7 +957,7 @@ public class ShadowEngineTests extends ESTestCase { ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); - pEngine.create(new Engine.Create(newUid("1"), doc)); + pEngine.index(new Engine.Index(newUid("1"), doc)); pEngine.flush(true, true); t.join(); @@ -965,4 +965,13 @@ public class ShadowEngineTests extends ESTestCase { // (shadow engine is already shut down in the try-with-resources) IOUtils.close(srStore, pEngine, pStore); } + + public void testNoTranslog() { + try { + replicaEngine.getTranslog(); + fail("shadow engine has no translog"); + } catch (UnsupportedOperationException ex) { + // all good + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 6c3054f379f..117ef2f4993 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -601,10 +601,10 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals)); // 3 b/c 1 segment level caches and 1 top level cache // in case of doc values, we don't cache atomic FD, so only the top-level cache is there - assertThat(indicesFieldDataCache.getCache().size(), equalTo(hasDocValues() ? 1L : 4L)); + assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L)); IndexOrdinalsFieldData cachedInstance = null; - for (Accountable ramUsage : indicesFieldDataCache.getCache().asMap().values()) { + for (Accountable ramUsage : indicesFieldDataCache.getCache().values()) { if (ramUsage instanceof IndexOrdinalsFieldData) { cachedInstance = (IndexOrdinalsFieldData) ramUsage; break; @@ -613,12 +613,12 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI assertThat(cachedInstance, sameInstance(globalOrdinals)); topLevelReader.close(); // Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW - assertThat(indicesFieldDataCache.getCache().size(), equalTo(hasDocValues() ? 0L : 3L)); + assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L)); refreshReader(); assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals))); ifdService.clear(); - assertThat(indicesFieldDataCache.getCache().size(), equalTo(0l)); + assertThat(indicesFieldDataCache.getCache().weight(), equalTo(0l)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 8326287dc52..f01df630ea7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -201,7 +201,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source); assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken()); ctx.parser().nextToken(); - return DocumentParser.parseObject(ctx, mapper.root()); + return DocumentParser.parseObject(ctx, mapper.root(), true); } public void testDynamicMappingsNotNeeded() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 5e3b61a7da1..6ab4ca38d40 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import com.google.common.collect.Iterators; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -30,7 +29,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.List; -import java.util.stream.StreamSupport; public class FieldTypeLookupTests extends ESTestCase { @@ -61,7 +59,7 @@ public class FieldTypeLookupTests extends ESTestCase { assertNull(lookup.get("bar")); assertEquals(f.fieldType(), lookup2.getByIndexName("bar")); assertNull(lookup.getByIndexName("foo")); - assertEquals(1, Iterators.size(lookup2.iterator())); + assertEquals(1, size(lookup2.iterator())); } public void testAddExistingField() { @@ -76,7 +74,7 @@ public class FieldTypeLookupTests extends ESTestCase { assertSame(f.fieldType(), f2.fieldType()); assertSame(f.fieldType(), lookup2.get("foo")); assertSame(f.fieldType(), lookup2.getByIndexName("foo")); - assertEquals(1, Iterators.size(lookup2.iterator())); + assertEquals(1, size(lookup2.iterator())); } public void testAddExistingIndexName() { @@ -92,7 +90,7 @@ public class FieldTypeLookupTests extends ESTestCase { assertSame(f.fieldType(), lookup2.get("foo")); assertSame(f.fieldType(), lookup2.get("bar")); assertSame(f.fieldType(), lookup2.getByIndexName("foo")); - assertEquals(2, Iterators.size(lookup2.iterator())); + assertEquals(2, size(lookup2.iterator())); } public void testAddExistingFullName() { @@ -108,7 +106,7 @@ public class FieldTypeLookupTests extends ESTestCase { assertSame(f.fieldType(), lookup2.get("foo")); assertSame(f.fieldType(), lookup2.getByIndexName("foo")); assertSame(f.fieldType(), lookup2.getByIndexName("bar")); - assertEquals(1, Iterators.size(lookup2.iterator())); + assertEquals(1, size(lookup2.iterator())); } public void testAddExistingBridgeName() { @@ -286,4 +284,16 @@ public class FieldTypeLookupTests extends ESTestCase { @Override protected void parseCreateField(ParseContext context, List list) throws IOException {} } + + private int size(Iterator iterator) { + if (iterator == null) { + throw new NullPointerException("iterator"); + } + int count = 0; + while (iterator.hasNext()) { + count++; + iterator.next(); + } + return count; + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index a7314c2d27f..0e3a04aa699 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; @@ -453,4 +454,17 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { // the backcompat behavior is actually ignoring directly specifying _all assertFalse(field.getAllEntries().fields().iterator().hasNext()); } + + public void testIncludeInObjectNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + try { + docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject().field("_all", "foo").endObject().bytes()); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_all] is a metadata field and cannot be added inside a document")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java index b5a002d331e..0bc56b0c30b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java @@ -93,35 +93,4 @@ public class BinaryMappingTests extends ESSingleNodeTestCase { assertEquals(new BytesArray(value), originalValue); } } - - public void testCompressedBackCompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("field") - .field("type", "binary") - .field("store", "yes") - .endObject() - .endObject() - .endObject().endObject().string(); - - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_5_0).build(); - DocumentMapper mapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - final byte[] original = new byte[100]; - original[56] = 1; - BytesStreamOutput out = new BytesStreamOutput(); - try (StreamOutput compressed = CompressorFactory.defaultCompressor().streamOutput(out)) { - new BytesArray(original).writeTo(compressed); - } - final byte[] binaryValue = out.bytes().toBytes(); - assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue))); - - ParsedDocument doc = mapper.parse("test", "type", "id", XContentFactory.jsonBuilder().startObject().field("field", binaryValue).endObject().bytes()); - BytesRef indexedValue = doc.rootDoc().getBinaryValue("field"); - assertEquals(new BytesRef(binaryValue), indexedValue); - FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field"); - Object originalValue = fieldMapper.fieldType().valueForSearch(indexedValue); - assertEquals(new BytesArray(original), originalValue); - } - } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java index f241d555e12..7ab78864fb7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java @@ -28,15 +28,4 @@ public class BinaryFieldTypeTests extends FieldTypeTestCase { protected MappedFieldType createDefaultFieldType() { return new BinaryFieldMapper.BinaryFieldType(); } - - @Before - public void setupProperties() { - addModifier(new Modifier("try_uncompressing", false, true) { - @Override - public void modify(MappedFieldType ft) { - BinaryFieldMapper.BinaryFieldType bft = (BinaryFieldMapper.BinaryFieldType)ft; - bft.setTryUncompressing(!bft.tryUncompressing()); - } - }); - } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e0c7a30673a..3d2134f3664 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.mapper.externalvalues; -import com.google.common.collect.Iterators; import com.spatial4j.core.shape.Point; import org.apache.lucene.document.Field; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java index 2688674f859..679b49e7be5 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java @@ -114,4 +114,17 @@ public class IdMappingTests extends ESSingleNodeTestCase { // _id is not indexed so we need to check _uid assertEquals(Uid.createUid("type", "1"), doc.rootDoc().get(UidFieldMapper.NAME)); } + + public void testIncludeInObjectNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + try { + docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder() + .startObject().field("_id", "1").endObject().bytes()).type("type")); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_id] is a metadata field and cannot be added inside a document")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java index bdfb0e475b4..3719500669c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; @@ -32,21 +33,18 @@ import static org.hamcrest.Matchers.nullValue; public class ParentMappingTests extends ESSingleNodeTestCase { - public void testParentNotSet() throws Exception { + public void testParentSetInDocNotAllowed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); - ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder() - .startObject() - .field("_parent", "1122") - .field("x_field", "x_value") - .endObject() - .bytes()).type("type").id("1")); - - // no _parent mapping, dynamically used as a string field - assertNull(doc.parent()); - assertNotNull(doc.rootDoc().get("_parent")); + try { + docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder() + .startObject().field("_parent", "1122").endObject().bytes()).type("type").id("1")); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_parent] is a metadata field and cannot be added inside a document")); + } } public void testParentSetInDocBackcompat() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java index 30fcb5f3d6d..7d0afdb0724 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -113,7 +114,7 @@ public class RoutingTypeMapperTests extends ESSingleNodeTestCase { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("_timestamp", 2000000).endObject(); + XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("_routing", "foo").endObject(); MappingMetaData mappingMetaData = new MappingMetaData(docMapper); IndexRequest request = new IndexRequest("test", "type", "1").source(doc); request.process(MetaData.builder().build(), mappingMetaData, true, "test"); @@ -122,4 +123,17 @@ public class RoutingTypeMapperTests extends ESSingleNodeTestCase { assertNull(request.routing()); assertNull(docMapper.parse("test", "type", "1", doc.bytes()).rootDoc().get("_routing")); } + + public void testIncludeInObjectNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + try { + docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject().field("_routing", "foo").endObject().bytes()); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_routing] is a metadata field and cannot be added inside a document")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 057dc41f0f9..e51b6a61d50 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -769,6 +769,21 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { assertNull(docMapper.parse("test", "type", "1", doc.bytes()).rootDoc().get("_timestamp")); } + public void testIncludeInObjectNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_timestamp").field("enabled", true).field("default", "1970").field("format", "YYYY").endObject() + .endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + try { + docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject().field("_timestamp", 2000000).endObject().bytes()); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_timestamp] is a metadata field and cannot be added inside a document")); + } + } + public void testThatEpochCanBeIgnoredWithCustomFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_timestamp").field("enabled", true).field("format", "yyyyMMddHH").endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index c9b6131900f..b9f7a988788 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -310,6 +310,21 @@ public class TTLMappingTests extends ESSingleNodeTestCase { assertNull(docMapper.parse("test", "type", "1", doc.bytes()).rootDoc().get("_ttl")); } + public void testIncludeInObjectNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_ttl").field("enabled", true).endObject() + .endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + try { + docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject().field("_ttl", "2d").endObject().bytes()); + fail("Expected failure to parse metadata field"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage(), e.getMessage().contains("Field [_ttl] is a metadata field and cannot be added inside a document")); + } + } + private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithTtlEnabled() throws IOException { return getMappingWithTtlEnabled(null); } diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index 5837d5b16ff..fce0a9478b8 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -215,7 +215,7 @@ public abstract class AbstractQueryTestCase> new IndexSettingsModule(index, indexSettings), new IndexCacheModule(indexSettings), new AnalysisModule(indexSettings, new IndicesAnalysisService(indexSettings)), - new SimilarityModule(indexSettings), + new SimilarityModule(index, indexSettings), new IndexNameModule(index), new AbstractModule() { @Override diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java index 9dd1a55f98c..19e48aa41a7 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery; import org.junit.Test; @@ -108,8 +109,12 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase { + @Override + public MappedFieldType.Names getFieldNames() { + return new MappedFieldType.Names("test"); + } + + @Override + public FieldDataType getFieldDataType() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public AtomicFieldData load(LeafReaderContext context) { + return new AtomicFieldData() { + + @Override + public ScriptDocValues getScriptValues() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public SortedBinaryDocValues getBytesValues() { + return new SortedBinaryDocValues() { + @Override + public void setDocument(int docId) { + } + + @Override + public int count() { + return 1; + } + + @Override + public BytesRef valueAt(int index) { + return new BytesRef("0"); + } + }; + } + + @Override + public long ramBytesUsed() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public Collection getChildResources() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void close() { + } + }; + } + + @Override + public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public IndexFieldData.XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, IndexFieldData.XFieldComparatorSource.Nested nested) { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void clear(IndexReader reader) { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public Index index() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + } + + /** + * Stub for IndexNumericFieldData needed by some score functions. Returns 1 as value always. + */ + private static class IndexNumericFieldDataStub implements IndexNumericFieldData { + + @Override + public NumericType getNumericType() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public MappedFieldType.Names getFieldNames() { + return new MappedFieldType.Names("test"); + } + + @Override + public FieldDataType getFieldDataType() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public AtomicNumericFieldData load(LeafReaderContext context) { + return new AtomicNumericFieldData() { + @Override + public SortedNumericDocValues getLongValues() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public SortedNumericDoubleValues getDoubleValues() { + return new SortedNumericDoubleValues() { + @Override + public void setDocument(int doc) { + } + + @Override + public double valueAt(int index) { + return 1; + } + + @Override + public int count() { + return 1; + } + }; + } + + @Override + public ScriptDocValues getScriptValues() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public SortedBinaryDocValues getBytesValues() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public long ramBytesUsed() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public Collection getChildResources() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void close() { + } + }; + } + + @Override + public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Exception { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested) { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public void clear(IndexReader reader) { + throw new UnsupportedOperationException(UNSUPPORTED); + } + + @Override + public Index index() { + throw new UnsupportedOperationException(UNSUPPORTED); + } + } + + private static final ScoreFunction RANDOM_SCORE_FUNCTION = new RandomScoreFunction(0, 0, new IndexFieldDataStub()); + private static final ScoreFunction FIELD_VALUE_FACTOR_FUNCTION = new FieldValueFactorFunction("test", 1, FieldValueFactorFunction.Modifier.LN, new Double(1), null); + private static final ScoreFunction GAUSS_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, GaussDecayFunctionBuilder.GAUSS_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); + private static final ScoreFunction EXP_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, ExponentialDecayFunctionBuilder.EXP_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); + private static final ScoreFunction LIN_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, LinearDecayFunctionBuilder.LINEAR_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); + private static final ScoreFunction WEIGHT_FACTOR_FUNCTION = new WeightFactorFunction(4); + private static final String TEXT = "The way out is through."; + private static final String FIELD = "test"; + private static final Term TERM = new Term(FIELD, "through"); + private Directory dir; + private IndexWriter w; + private DirectoryReader reader; + private IndexSearcher searcher; + + @Before + public void initSearcher() throws IOException { + dir = newDirectory(); + w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer())); + Document d = new Document(); + d.add(new TextField(FIELD, TEXT, Field.Store.YES)); + d.add(new TextField("_uid", "1", Field.Store.YES)); + w.addDocument(d); + w.commit(); + reader = DirectoryReader.open(w, true); + searcher = newSearcher(reader); + } + + @After + public void closeAllTheReaders() throws IOException { + reader.close(); + w.close(); + dir.close(); + } + + @Test + public void testExplainFunctionScoreQuery() throws IOException { + + Explanation functionExplanation = getFunctionScoreExplanation(searcher, RANDOM_SCORE_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "random score function (seed: 0)"); + assertThat(functionExplanation.getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFunctionScoreExplanation(searcher, FIELD_VALUE_FACTOR_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)"); + assertThat(functionExplanation.getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFunctionScoreExplanation(searcher, GAUSS_DECAY_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "Function for field test:"); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].toString(), equalTo("0.1 = exp(-0.5*pow(MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)],2.0)/0.21714724095162594)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFunctionScoreExplanation(searcher, EXP_DECAY_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "Function for field test:"); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].toString(), equalTo("0.1 = exp(- MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)] * 2.3025850929940455)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFunctionScoreExplanation(searcher, LIN_DECAY_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "Function for field test:"); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].toString(), equalTo("0.1 = max(0.0, ((1.1111111111111112 - MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)])/1.1111111111111112)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFunctionScoreExplanation(searcher, WEIGHT_FACTOR_FUNCTION); + checkFunctionScoreExplanation(functionExplanation, "product of:"); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].toString(), equalTo("1.0 = constant score 1.0 - no function provided\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[1].toString(), equalTo("4.0 = weight\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails().length, equalTo(0)); + assertThat(functionExplanation.getDetails()[0].getDetails()[1].getDetails().length, equalTo(0)); + } + + public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException { + FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, 0.0f, CombineFunction.AVG, 100); + Weight weight = searcher.createNormalizedWeight(functionScoreQuery, true); + Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0); + return explanation.getDetails()[1]; + } + + public void checkFunctionScoreExplanation(Explanation randomExplanation, String functionExpl) { + assertThat(randomExplanation.getDescription(), equalTo("min of:")); + assertThat(randomExplanation.getDetails()[0].getDescription(), equalTo(functionExpl)); + } + + @Test + public void testExplainFiltersFunctionScoreQuery() throws IOException { + Explanation functionExplanation = getFiltersFunctionScoreExplanation(searcher, RANDOM_SCORE_FUNCTION); + checkFiltersFunctionScoreExplanation(functionExplanation, "random score function (seed: 0)", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0)); + + functionExplanation = getFiltersFunctionScoreExplanation(searcher, FIELD_VALUE_FACTOR_FUNCTION); + checkFiltersFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0)); + + functionExplanation = getFiltersFunctionScoreExplanation(searcher, GAUSS_DECAY_FUNCTION); + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = exp(-0.5*pow(MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)],2.0)/0.21714724095162594)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFiltersFunctionScoreExplanation(searcher, EXP_DECAY_FUNCTION); + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = exp(- MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)] * 2.3025850929940455)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + + functionExplanation = getFiltersFunctionScoreExplanation(searcher, LIN_DECAY_FUNCTION); + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = max(0.0, ((1.1111111111111112 - MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)])/1.1111111111111112)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + + // now test all together + functionExplanation = getFiltersFunctionScoreExplanation(searcher + , RANDOM_SCORE_FUNCTION + , FIELD_VALUE_FACTOR_FUNCTION + , GAUSS_DECAY_FUNCTION + , EXP_DECAY_FUNCTION + , LIN_DECAY_FUNCTION + ); + + checkFiltersFunctionScoreExplanation(functionExplanation, "random score function (seed: 0)", 0); + assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0)); + + checkFiltersFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1); + assertThat(functionExplanation.getDetails()[0].getDetails()[1].getDetails()[1].getDetails().length, equalTo(0)); + + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 2); + assertThat(functionExplanation.getDetails()[0].getDetails()[2].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = exp(-0.5*pow(MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)],2.0)/0.21714724095162594)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[2].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 3); + assertThat(functionExplanation.getDetails()[0].getDetails()[3].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = exp(- MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)] * 2.3025850929940455)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[3].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + + checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 4); + assertThat(functionExplanation.getDetails()[0].getDetails()[4].getDetails()[1].getDetails()[0].toString(), equalTo("0.1 = max(0.0, ((1.1111111111111112 - MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)])/1.1111111111111112)\n")); + assertThat(functionExplanation.getDetails()[0].getDetails()[4].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); + } + + public Explanation getFiltersFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction... scoreFunctions) throws IOException { + FiltersFunctionScoreQuery filtersFunctionScoreQuery = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.AVG, CombineFunction.AVG, scoreFunctions); + Weight weight = searcher.createNormalizedWeight(filtersFunctionScoreQuery, true); + Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0); + return explanation.getDetails()[1]; + } + + public FiltersFunctionScoreQuery getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode scoreMode, CombineFunction combineFunction, ScoreFunction... scoreFunctions) { + FiltersFunctionScoreQuery.FilterFunction[] filterFunctions = new FiltersFunctionScoreQuery.FilterFunction[scoreFunctions.length]; + for (int i = 0; i < scoreFunctions.length; i++) { + filterFunctions[i] = new FiltersFunctionScoreQuery.FilterFunction( + new TermQuery(TERM), scoreFunctions[i]); + } + return new FiltersFunctionScoreQuery(new TermQuery(TERM), scoreMode, filterFunctions, Float.MAX_VALUE, Float.MAX_VALUE * -1, combineFunction); + } + + public void checkFiltersFunctionScoreExplanation(Explanation randomExplanation, String functionExpl, int whichFunction) { + assertThat(randomExplanation.getDescription(), equalTo("min of:")); + assertThat(randomExplanation.getDetails()[0].getDescription(), equalTo("function score, score mode [avg]")); + assertThat(randomExplanation.getDetails()[0].getDetails()[whichFunction].getDescription(), equalTo("function score, product of:")); + assertThat(randomExplanation.getDetails()[0].getDetails()[whichFunction].getDetails()[0].getDescription(), equalTo("match filter: " + FIELD + ":" + TERM.text())); + assertThat(randomExplanation.getDetails()[0].getDetails()[whichFunction].getDetails()[1].getDescription(), equalTo(functionExpl)); + } + + private static float[] randomFloats(int size) { + float[] weights = new float[size]; + for (int i = 0; i < weights.length; i++) { + weights[i] = randomFloat() * (randomBoolean() ? 1.0f : -1.0f) * randomInt(100) + 1.e-5f; + } + return weights; + } + + private static class ScoreFunctionStub extends ScoreFunction { + private float score; + + ScoreFunctionStub(float score) { + super(CombineFunction.REPLACE); + this.score = score; + } + + @Override + public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException { + return new LeafScoreFunction() { + @Override + public double score(int docId, float subQueryScore) { + return score; + } + + @Override + public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED); + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + protected boolean doEquals(ScoreFunction other) { + return false; + } + } + + @Test + public void simpleWeightedFunctionsTest() throws IOException, ExecutionException, InterruptedException { + int numFunctions = randomIntBetween(1, 3); + float[] weights = randomFloats(numFunctions); + float[] scores = randomFloats(numFunctions); + ScoreFunctionStub[] scoreFunctionStubs = new ScoreFunctionStub[numFunctions]; + for (int i = 0; i < numFunctions; i++) { + scoreFunctionStubs[i] = new ScoreFunctionStub(scores[i]); + } + WeightFactorFunction[] weightFunctionStubs = new WeightFactorFunction[numFunctions]; + for (int i = 0; i < numFunctions; i++) { + weightFunctionStubs[i] = new WeightFactorFunction(weights[i], scoreFunctionStubs[i]); + } + + FiltersFunctionScoreQuery filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery( + FiltersFunctionScoreQuery.ScoreMode.MULTIPLY + , CombineFunction.REPLACE + , weightFunctionStubs + ); + + TopDocs topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + float scoreWithWeight = topDocsWithWeights.scoreDocs[0].score; + double score = 1; + for (int i = 0; i < weights.length; i++) { + score *= weights[i] * scores[i]; + } + assertThat(scoreWithWeight / score, closeTo(1, 1.e-5d)); + + filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery( + FiltersFunctionScoreQuery.ScoreMode.SUM + , CombineFunction.REPLACE + , weightFunctionStubs + ); + + topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + scoreWithWeight = topDocsWithWeights.scoreDocs[0].score; + double sum = 0; + for (int i = 0; i < weights.length; i++) { + sum += weights[i] * scores[i]; + } + assertThat(scoreWithWeight / sum, closeTo(1, 1.e-5d)); + + filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery( + FiltersFunctionScoreQuery.ScoreMode.AVG + , CombineFunction.REPLACE + , weightFunctionStubs + ); + + topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + scoreWithWeight = topDocsWithWeights.scoreDocs[0].score; + double norm = 0; + sum = 0; + for (int i = 0; i < weights.length; i++) { + norm += weights[i]; + sum += weights[i] * scores[i]; + } + assertThat(scoreWithWeight * norm / sum, closeTo(1, 1.e-5d)); + + filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery( + FiltersFunctionScoreQuery.ScoreMode.MIN + , CombineFunction.REPLACE + , weightFunctionStubs + ); + + topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + scoreWithWeight = topDocsWithWeights.scoreDocs[0].score; + double min = Double.POSITIVE_INFINITY; + for (int i = 0; i < weights.length; i++) { + min = Math.min(min, weights[i] * scores[i]); + } + assertThat(scoreWithWeight / min, closeTo(1, 1.e-5d)); + + filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery( + FiltersFunctionScoreQuery.ScoreMode.MAX + , CombineFunction.REPLACE + , weightFunctionStubs + ); + + topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + scoreWithWeight = topDocsWithWeights.scoreDocs[0].score; + double max = Double.NEGATIVE_INFINITY; + for (int i = 0; i < weights.length; i++) { + max = Math.max(max, weights[i] * scores[i]); + } + assertThat(scoreWithWeight / max, closeTo(1, 1.e-5d)); + } + + @Test + public void checkWeightOnlyCreatesBoostFunction() throws IOException { + FunctionScoreQuery filtersFunctionScoreQueryWithWeights = new FunctionScoreQuery(new MatchAllDocsQuery(), new WeightFactorFunction(2), 0.0f, CombineFunction.MULTIPLY, 100); + TopDocs topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1); + float score = topDocsWithWeights.scoreDocs[0].score; + assertThat(score, equalTo(2.0f)); + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index c1bdd9d2e7e..7d4b10a41ca 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -69,7 +70,6 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -95,10 +95,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; -import static org.elasticsearch.cluster.metadata.IndexMetaData.EMPTY_PARAMS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; @@ -342,7 +339,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - indicesService.indexService("test").getShardOrNull(0).markAsInactive(); + Boolean result = indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + assertEquals(Boolean.TRUE, result); assertBusy(new Runnable() { // should be very very quick @Override public void run() { @@ -401,35 +399,6 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(durabilty, shard.getTranslogDurability()); } - public void testDeleteByQueryBWC() { - Version version = VersionUtils.randomVersion(random()); - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, IndexMetaData.SETTING_VERSION_CREATED, version.id)); - ensureGreen("test"); - client().prepareIndex("test", "person").setSource("{ \"user\" : \"kimchy\" }").get(); - - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); - IndexShard shard = test.getShardOrNull(0); - int numDocs = 1; - shard.state = IndexShardState.RECOVERING; - try { - shard.recoveryState().getTranslog().totalOperations(1); - shard.getEngine().config().getTranslogRecoveryPerformer().performRecoveryOperation(shard.getEngine(), new Translog.DeleteByQuery(new Engine.DeleteByQuery(null, new BytesArray("{\"term\" : { \"user\" : \"kimchy\" }}"), null, null, null, Engine.Operation.Origin.RECOVERY, 0, "person")), false); - assertTrue(version.onOrBefore(Version.V_1_0_0_Beta2)); - numDocs = 0; - } catch (ParsingException ex) { - assertTrue(version.after(Version.V_1_0_0_Beta2)); - } finally { - shard.state = IndexShardState.STARTED; - } - shard.getEngine().refresh("foo"); - - try (Engine.Searcher searcher = shard.getEngine().acquireSearcher("foo")) { - assertEquals(numDocs, searcher.reader().numDocs()); - } - } - public void testMinimumCompatVersion() { Version versionCreated = VersionUtils.randomVersion(random()); assertAcked(client().admin().indices().prepareCreate("test") @@ -628,9 +597,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { shardIndexingService.addListener(new IndexingOperationListener() { @Override - public Engine.Index preIndex(Engine.Index index) { + public Engine.Index preIndex(Engine.Index operation) { preIndexCalled.set(true); - return super.preIndex(index); + return super.preIndex(operation); } }); @@ -957,7 +926,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } }; - IndexServicesProvider newProvider = new IndexServicesProvider(indexServices.getIndicesLifecycle(), indexServices.getThreadPool(), indexServices.getMapperService(), indexServices.getQueryParserService(), indexServices.getIndexCache(), indexServices.getIndexAliasesService(), indexServices.getIndicesQueryCache(), indexServices.getCodecService(), indexServices.getTermVectorsService(), indexServices.getIndexFieldDataService(), indexServices.getWarmer(), indexServices.getSimilarityService(), indexServices.getFactory(), indexServices.getBigArrays(), wrapper); + IndexServicesProvider newProvider = new IndexServicesProvider(indexServices.getIndicesLifecycle(), indexServices.getThreadPool(), indexServices.getMapperService(), indexServices.getQueryParserService(), indexServices.getIndexCache(), indexServices.getIndexAliasesService(), indexServices.getIndicesQueryCache(), indexServices.getCodecService(), indexServices.getTermVectorsService(), indexServices.getIndexFieldDataService(), indexServices.getWarmer(), indexServices.getSimilarityService(), indexServices.getFactory(), indexServices.getBigArrays(), wrapper, indexServices.getIndexingMemoryController()); IndexShard newShard = new IndexShard(shard.shardId(), shard.indexSettings, shard.shardPath(), shard.store(), newProvider); ShardRoutingHelper.reinit(routing); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityModuleTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityModuleTests.java new file mode 100644 index 00000000000..a73d2a5dac4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityModuleTests.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.similarity; + +import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; + +import java.io.IOException; + +public class SimilarityModuleTests extends ModuleTestCase { + + public void testAddSimilarity() { + Settings indexSettings = Settings.settingsBuilder() + .put("index.similarity.my_similarity.type", "test_similarity") + .put("index.similarity.my_similarity.key", "there is a key") + .build(); + SimilarityModule module = new SimilarityModule(new Index("foo"), indexSettings); + module.addSimilarity("test_similarity", (string, settings) -> new SimilarityProvider() { + @Override + public String name() { + return string; + } + + @Override + public Similarity get() { + return new TestSimilarity(settings.get("key")); + } + }); + assertInstanceBinding(module, SimilarityService.class, (inst) -> { + if (inst instanceof SimilarityService) { + assertNotNull(inst.getSimilarity("my_similarity")); + assertTrue(inst.getSimilarity("my_similarity").get() instanceof TestSimilarity); + assertEquals("my_similarity", inst.getSimilarity("my_similarity").name()); + assertEquals("there is a key" , ((TestSimilarity)inst.getSimilarity("my_similarity").get()).key); + return true; + } + return false; + }); + } + + public void testSetupUnknownSimilarity() { + Settings indexSettings = Settings.settingsBuilder() + .put("index.similarity.my_similarity.type", "test_similarity") + .build(); + SimilarityModule module = new SimilarityModule(new Index("foo"), indexSettings); + try { + assertInstanceBinding(module, SimilarityService.class, (inst) -> inst instanceof SimilarityService); + } catch (IllegalArgumentException ex) { + assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage()); + } + } + + + public void testSetupWithoutType() { + Settings indexSettings = Settings.settingsBuilder() + .put("index.similarity.my_similarity.foo", "bar") + .build(); + SimilarityModule module = new SimilarityModule(new Index("foo"), indexSettings); + try { + assertInstanceBinding(module, SimilarityService.class, (inst) -> inst instanceof SimilarityService); + } catch (IllegalArgumentException ex) { + assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage()); + } + } + + + private static class TestSimilarity extends Similarity { + private final Similarity delegate = new BM25Similarity(); + private final String key; + + + public TestSimilarity(String key) { + if (key == null) { + throw new AssertionError("key is null"); + } + this.key = key; + } + + @Override + public long computeNorm(FieldInvertState state) { + return delegate.computeNorm(state); + } + + @Override + public SimWeight computeWeight(CollectionStatistics collectionStats, TermStatistics... termStats) { + return delegate.computeWeight(collectionStats, termStats); + } + + @Override + public SimScorer simScorer(SimWeight weight, LeafReaderContext context) throws IOException { + return delegate.simScorer(weight, context); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 6a42ba7e8ce..28f5e5c62f6 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.*; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Test; @@ -35,11 +36,9 @@ public class SimilarityTests extends ESSingleNodeTestCase { @Test public void testResolveDefaultSimilarities() { - SimilarityLookupService similarityLookupService = createIndex("foo").similarityService().similarityLookupService(); - assertThat(similarityLookupService.similarity("default"), instanceOf(PreBuiltSimilarityProvider.class)); - assertThat(similarityLookupService.similarity("default").get(), instanceOf(DefaultSimilarity.class)); - assertThat(similarityLookupService.similarity("BM25"), instanceOf(PreBuiltSimilarityProvider.class)); - assertThat(similarityLookupService.similarity("BM25").get(), instanceOf(BM25Similarity.class)); + SimilarityService similarityService = createIndex("foo").similarityService(); + assertThat(similarityService.getSimilarity("default").get(), instanceOf(DefaultSimilarity.class)); + assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); } @Test @@ -54,8 +53,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.type", "default") .put("index.similarity.my_similarity.discount_overlaps", false) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DefaultSimilarityProvider.class)); DefaultSimilarity similarity = (DefaultSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); @@ -76,8 +75,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.b", 1.5f) .put("index.similarity.my_similarity.discount_overlaps", false) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(BM25SimilarityProvider.class)); BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); @@ -101,8 +100,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.normalization", "h2") .put("index.similarity.my_similarity.normalization.h2.c", 3f) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DFRSimilarityProvider.class)); DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); @@ -127,8 +126,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.normalization", "h2") .put("index.similarity.my_similarity.normalization.h2.c", 3f) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(IBSimilarityProvider.class)); IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); @@ -150,8 +149,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.type", "LMDirichlet") .put("index.similarity.my_similarity.mu", 3000f) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMDirichletSimilarityProvider.class)); LMDirichletSimilarity similarity = (LMDirichletSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); @@ -170,8 +169,8 @@ public class SimilarityTests extends ESSingleNodeTestCase { .put("index.similarity.my_similarity.type", "LMJelinekMercer") .put("index.similarity.my_similarity.lambda", 0.7f) .build(); - SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping); + IndexService indexService = createIndex("foo", indexSettings); + DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse(mapping); assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMJelinekMercerSimilarityProvider.class)); LMJelinekMercerSimilarity similarity = (LMJelinekMercerSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); diff --git a/core/src/test/java/org/elasticsearch/index/store/IndexStoreBWCTests.java b/core/src/test/java/org/elasticsearch/index/store/IndexStoreBWCTests.java deleted file mode 100644 index e53358c6631..00000000000 --- a/core/src/test/java/org/elasticsearch/index/store/IndexStoreBWCTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.store; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.lucene.store.*; -import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Locale; - -/** - */ -public class IndexStoreBWCTests extends ESSingleNodeTestCase { - - - public void testOldCoreTypesFail() { - try { - createIndex("test", Settings.builder().put(IndexStoreModule.STORE_TYPE, "nio_fs").build()); - fail(); - } catch (Exception ex) { - } - try { - createIndex("test", Settings.builder().put(IndexStoreModule.STORE_TYPE, "mmap_fs").build()); - fail(); - } catch (Exception ex) { - } - try { - createIndex("test", Settings.builder().put(IndexStoreModule.STORE_TYPE, "simple_fs").build()); - fail(); - } catch (Exception ex) { - } - } - - public void testUpgradeCoreTypes() throws IOException { - String type = RandomPicks.randomFrom(random(), Arrays.asList("nio", "mmap", "simple")); - createIndex("test", Settings.builder() - .put(IndexStoreModule.STORE_TYPE, type+"fs") - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_7_0) - .build()); - - client().admin().indices().prepareClose("test").get(); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexStoreModule.STORE_TYPE, type + "_fs").build()).get(); - GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); - String actualType = getSettingsResponse.getSetting("test", IndexStoreModule.STORE_TYPE); - assertEquals(type + "_fs", actualType); - - // now reopen and upgrade - client().admin().indices().prepareOpen("test").get(); - - getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); - actualType = getSettingsResponse.getSetting("test", IndexStoreModule.STORE_TYPE); - assertEquals(type+"fs", actualType); - } - -} diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 8764d1a0af4..0b3e12dd9cd 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -183,14 +184,14 @@ public class TranslogTests extends ESTestCase { @Test public void testRead() throws IOException { - Translog.Location loc1 = translog.add(new Translog.Create("test", "1", new byte[]{1})); - Translog.Location loc2 = translog.add(new Translog.Create("test", "2", new byte[]{2})); + Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); + Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2})); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); translog.sync(); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); - Translog.Location loc3 = translog.add(new Translog.Create("test", "2", new byte[]{3})); + Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3})); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.sync(); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); @@ -215,19 +216,13 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); - snapshot = translog.newSnapshot(); - assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); - snapshot.close(); - - addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", new byte[]{1})); snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size())); snapshot.close(); - addToTranslogAndList(translog, ops, new Translog.Delete(newUid("3"))); + addToTranslogAndList(translog, ops, new Translog.Delete(newUid("2"))); snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size())); @@ -235,17 +230,13 @@ public class TranslogTests extends ESTestCase { snapshot = translog.newSnapshot(); - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat(create != null, equalTo(true)); - assertThat(create.source().toBytes(), equalTo(new byte[]{1})); - Translog.Index index = (Translog.Index) snapshot.next(); assertThat(index != null, equalTo(true)); - assertThat(index.source().toBytes(), equalTo(new byte[]{2})); + assertThat(index.source().toBytes(), equalTo(new byte[]{1})); Translog.Delete delete = (Translog.Delete) snapshot.next(); assertThat(delete != null, equalTo(true)); - assertThat(delete.uid(), equalTo(newUid("3"))); + assertThat(delete.uid(), equalTo(newUid("2"))); assertThat(snapshot.next(), equalTo(null)); @@ -286,38 +277,63 @@ public class TranslogTests extends ESTestCase { final long firstOperationPosition = translog.getFirstOperationPosition(); TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(0l)); - long lastSize = stats.translogSizeInBytes().bytes(); + long lastSize = stats.getTranslogSizeInBytes(); assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC))); assertThat(lastSize, equalTo(firstOperationPosition)); - - translog.add(new Translog.Create("test", "1", new byte[]{1})); + TranslogStats total = new TranslogStats(); + translog.add(new Translog.Index("test", "1", new byte[]{1})); stats = stats(); + total.add(stats); assertThat(stats.estimatedNumberOfOperations(), equalTo(1l)); - assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); - lastSize = stats.translogSizeInBytes().bytes(); + assertThat(stats.getTranslogSizeInBytes(), greaterThan(lastSize)); + lastSize = stats.getTranslogSizeInBytes(); - translog.add(new Translog.Index("test", "2", new byte[]{2})); + translog.add(new Translog.Delete(newUid("2"))); stats = stats(); + total.add(stats); assertThat(stats.estimatedNumberOfOperations(), equalTo(2l)); - assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); - lastSize = stats.translogSizeInBytes().bytes(); + assertThat(stats.getTranslogSizeInBytes(), greaterThan(lastSize)); + lastSize = stats.getTranslogSizeInBytes(); translog.add(new Translog.Delete(newUid("3"))); - stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(3l)); - assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); - lastSize = stats.translogSizeInBytes().bytes(); - - translog.add(new Translog.Delete(newUid("4"))); translog.prepareCommit(); stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(4l)); - assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); + total.add(stats); + assertThat(stats.estimatedNumberOfOperations(), equalTo(3l)); + assertThat(stats.getTranslogSizeInBytes(), greaterThan(lastSize)); translog.commit(); stats = stats(); + total.add(stats); assertThat(stats.estimatedNumberOfOperations(), equalTo(0l)); - assertThat(stats.translogSizeInBytes().bytes(), equalTo(firstOperationPosition)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(firstOperationPosition)); + assertEquals(6, total.estimatedNumberOfOperations()); + assertEquals(431, total.getTranslogSizeInBytes()); + + BytesStreamOutput out = new BytesStreamOutput(); + total.writeTo(out); + TranslogStats copy = new TranslogStats(); + copy.readFrom(StreamInput.wrap(out.bytes())); + + assertEquals(6, copy.estimatedNumberOfOperations()); + assertEquals(431, copy.getTranslogSizeInBytes()); + assertEquals("\"translog\"{\n" + + " \"operations\" : 6,\n" + + " \"size_in_bytes\" : 431\n" + + "}", copy.toString().trim()); + + try { + new TranslogStats(1, -1); + fail("must be positive"); + } catch (IllegalArgumentException ex) { + //all well + } + try { + new TranslogStats(-1, 1); + fail("must be positive"); + } catch (IllegalArgumentException ex) { + //all well + } } @Test @@ -327,7 +343,7 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", new byte[]{1})); snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); @@ -354,7 +370,7 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", new byte[]{1})); Translog.Snapshot snapshot1 = translog.newSnapshot(); addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2})); @@ -375,7 +391,7 @@ public class TranslogTests extends ESTestCase { public void testSnapshotOnClosedTranslog() throws IOException { assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); - translog.add(new Translog.Create("test", "1", new byte[]{1})); + translog.add(new Translog.Index("test", "1", new byte[]{1})); translog.close(); try { Translog.Snapshot snapshot = translog.newSnapshot(); @@ -388,7 +404,7 @@ public class TranslogTests extends ESTestCase { @Test public void deleteOnSnapshotRelease() throws Exception { ArrayList firstOps = new ArrayList<>(); - addToTranslogAndList(translog, firstOps, new Translog.Create("test", "1", new byte[]{1})); + addToTranslogAndList(translog, firstOps, new Translog.Index("test", "1", new byte[]{1})); Translog.Snapshot firstSnapshot = translog.newSnapshot(); assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1)); @@ -463,10 +479,7 @@ public class TranslogTests extends ESTestCase { Translog.Operation op; switch (randomFrom(Translog.Operation.Type.values())) { case CREATE: - op = new Translog.Create("test", threadId + "_" + opCount, - randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); - break; - case SAVE: + case INDEX: op = new Translog.Index("test", threadId + "_" + opCount, randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); break; @@ -475,9 +488,6 @@ public class TranslogTests extends ESTestCase { 1 + randomInt(100000), randomFrom(VersionType.values())); break; - case DELETE_BY_QUERY: - // deprecated - continue; default: throw new ElasticsearchException("not supported op type"); } @@ -508,7 +518,7 @@ public class TranslogTests extends ESTestCase { Translog.Operation expectedOp = locationOperation.operation; assertEquals(expectedOp.opType(), op.opType()); switch (op.opType()) { - case SAVE: + case INDEX: Translog.Index indexOp = (Translog.Index) op; Translog.Index expIndexOp = (Translog.Index) expectedOp; assertEquals(expIndexOp.id(), indexOp.id()); @@ -518,16 +528,6 @@ public class TranslogTests extends ESTestCase { assertEquals(expIndexOp.version(), indexOp.version()); assertEquals(expIndexOp.versionType(), indexOp.versionType()); break; - case CREATE: - Translog.Create createOp = (Translog.Create) op; - Translog.Create expCreateOp = (Translog.Create) expectedOp; - assertEquals(expCreateOp.id(), createOp.id()); - assertEquals(expCreateOp.routing(), createOp.routing()); - assertEquals(expCreateOp.type(), createOp.type()); - assertEquals(expCreateOp.source(), createOp.source()); - assertEquals(expCreateOp.version(), createOp.version()); - assertEquals(expCreateOp.versionType(), createOp.versionType()); - break; case DELETE: Translog.Delete delOp = (Translog.Delete) op; Translog.Delete expDelOp = (Translog.Delete) expectedOp; @@ -550,7 +550,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAsciiOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Create("test", "" + op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, ascii.getBytes("UTF-8")))); } translog.sync(); @@ -574,7 +574,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAsciiOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Create("test", "" + op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, ascii.getBytes("UTF-8")))); } translog.sync(); @@ -638,7 +638,7 @@ public class TranslogTests extends ESTestCase { @Test public void testVerifyTranslogIsNotDeleted() throws IOException { assertFileIsPresent(translog, 1); - translog.add(new Translog.Create("test", "1", new byte[]{1})); + translog.add(new Translog.Index("test", "1", new byte[]{1})); Translog.Snapshot snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.size(1)); assertFileIsPresent(translog, 1); @@ -686,17 +686,12 @@ public class TranslogTests extends ESTestCase { final Translog.Operation op; switch (Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type.values().length))]) { case CREATE: - op = new Translog.Create("type", "" + id, new byte[]{(byte) id}); - break; - case SAVE: + case INDEX: op = new Translog.Index("type", "" + id, new byte[]{(byte) id}); break; case DELETE: op = new Translog.Delete(newUid("" + id)); break; - case DELETE_BY_QUERY: - // deprecated - continue; default: throw new ElasticsearchException("unknown type"); } @@ -830,12 +825,12 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); int count = 0; for (int op = 0; op < translogOperations; op++) { - final Translog.Location location = translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + final Translog.Location location = translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(location)); assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced - translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); assertTrue("one pending operation", translog.syncNeeded()); assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now assertTrue("we only synced a previous operation yet", translog.syncNeeded()); @@ -858,7 +853,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); int count = 0; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); if (rarely() && translogOperations > op+1) { translog.commit(); } @@ -887,14 +882,14 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); int lastSynced = -1; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (frequently()) { translog.sync(); lastSynced = op; } } assertEquals(translogOperations, translog.totalOperations()); - final Translog.Location lastLocation = translog.add(new Translog.Create("test", "" + translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); + final Translog.Location lastLocation = translog.add(new Translog.Index("test", "" + translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)); try (final ImmutableTranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) { @@ -975,7 +970,7 @@ public class TranslogTests extends ESTestCase { int minUncommittedOp = -1; final boolean commitOften = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); final boolean commit = commitOften ? frequently() : rarely(); if (commit && op < translogOperations-1) { translog.commit(); @@ -1017,7 +1012,7 @@ public class TranslogTests extends ESTestCase { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.prepareCommit(); @@ -1068,7 +1063,7 @@ public class TranslogTests extends ESTestCase { List ops = new ArrayList<>(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { - Translog.Create test = new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))); + Translog.Index test = new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))); ops.add(test); } Translog.writeOperations(out, ops); @@ -1083,8 +1078,8 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); try(Translog translog2 = create(createTempDir())) { for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); - locations2.add(translog2.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations2.add(translog2.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); } int iters = randomIntBetween(10, 100); for (int i = 0; i < iters; i++) { @@ -1110,7 +1105,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(1, 10); int firstUncommitted = 0; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { translog.commit(); firstUncommitted = op + 1; @@ -1138,147 +1133,4 @@ public class TranslogTests extends ESTestCase { assertNull(snapshot.next()); } } - - public void testUpgradeOldTranslogFiles() throws IOException { - List indexes = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) { - for (Path path : stream) { - indexes.add(path); - } - } - TranslogConfig config = this.translog.getConfig(); - Translog.TranslogGeneration gen = translog.getGeneration(); - this.translog.close(); - try { - Translog.upgradeLegacyTranslog(logger, translog.getConfig()); - fail("no generation set"); - } catch (IllegalArgumentException ex) { - - } - translog.getConfig().setTranslogGeneration(gen); - try { - Translog.upgradeLegacyTranslog(logger, translog.getConfig()); - fail("already upgraded generation set"); - } catch (IllegalArgumentException ex) { - - } - - for (Path indexFile : indexes) { - final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT); - Version version = Version.fromString(indexName.replace("index-", "")); - if (version.onOrAfter(Version.V_2_0_0_beta1)) { - continue; - } - Path unzipDir = createTempDir(); - Path unzipDataDir = unzipDir.resolve("data"); - // decompress the index - try (InputStream stream = Files.newInputStream(indexFile)) { - TestUtil.unzip(stream, unzipDir); - } - // check it is unique - assertTrue(Files.exists(unzipDataDir)); - Path[] list = FileSystemUtils.files(unzipDataDir); - if (list.length != 1) { - throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length); - } - // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); - Path translog = list[0].resolve("nodes/0/indices/" + indexName).resolve("0").resolve("translog"); - - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); - assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog)); - Path[] tlogFiles = FileSystemUtils.files(translog); - assertEquals(tlogFiles.length, 1); - final long size = Files.size(tlogFiles[0]); - - final long generation = parseLegacyTranslogFile(tlogFiles[0]); - assertTrue(generation >= 1); - logger.info("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); - TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), translog, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool()); - upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation)); - Translog.upgradeLegacyTranslog(logger, upgradeConfig); - try (Translog upgraded = new Translog(upgradeConfig)) { - assertEquals(generation + 1, upgraded.getGeneration().translogFileGeneration); - assertEquals(upgraded.getRecoveredReaders().size(), 1); - final long headerSize; - if (version.before(Version.V_1_4_0_Beta1)) { - assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReader.class); - headerSize = 0; - } else { - assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReaderBase.class); - headerSize = CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC); - } - List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = upgraded.newSnapshot()) { - Translog.Operation op = null; - while ((op = snapshot.next()) != null) { - operations.add(op); - } - } - if (size > headerSize) { - assertFalse(operations.toString(), operations.isEmpty()); - } else { - assertTrue(operations.toString(), operations.isEmpty()); - } - } - } - } - - /** - * this tests a set of files that has some of the operations flushed with a buffered translog such that tlogs are truncated. - * 3 of the 6 files are created with ES 1.3 and the rest is created wiht ES 1.4 such that both the checksummed as well as the - * super old version of the translog without a header is tested. - */ - public void testOpenAndReadTruncatedLegacyTranslogs() throws IOException { - Path zip = getDataPath("/org/elasticsearch/index/translog/legacy_translogs.zip"); - Path unzipDir = createTempDir(); - try (InputStream stream = Files.newInputStream(zip)) { - TestUtil.unzip(stream, unzipDir); - } - TranslogConfig config = this.translog.getConfig(); - int count = 0; - try (DirectoryStream stream = Files.newDirectoryStream(unzipDir)) { - - for (Path legacyTranslog : stream) { - logger.debug("upgrading {} ", legacyTranslog.getFileName()); - Path directory = legacyTranslog.resolveSibling("translog_" + count++); - Files.createDirectories(directory); - Files.copy(legacyTranslog, directory.resolve(legacyTranslog.getFileName())); - TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), directory, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool()); - try { - Translog.upgradeLegacyTranslog(logger, upgradeConfig); - fail("no generation set"); - } catch (IllegalArgumentException ex) { - // expected - } - long generation = parseLegacyTranslogFile(legacyTranslog); - upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation)); - Translog.upgradeLegacyTranslog(logger, upgradeConfig); - try (Translog tlog = new Translog(upgradeConfig)) { - List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = tlog.newSnapshot()) { - Translog.Operation op = null; - while ((op = snapshot.next()) != null) { - operations.add(op); - } - } - logger.debug("num ops recovered: {} for file {} ", operations.size(), legacyTranslog.getFileName()); - assertFalse(operations.isEmpty()); - } - } - } - } - - public static long parseLegacyTranslogFile(Path translogFile) { - final String fileName = translogFile.getFileName().toString(); - final Matcher matcher = PARSE_LEGACY_ID_PATTERN.matcher(fileName); - if (matcher.matches()) { - try { - return Long.parseLong(matcher.group(1)); - } catch (NumberFormatException e) { - throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]", e); - } - } - throw new IllegalArgumentException("can't parse id from file: " + fileName); - } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java index 451fdf34025..283124d09ed 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java @@ -45,7 +45,7 @@ public class TranslogVersionTests extends ESTestCase { assertThat("a version0 stream is returned", reader instanceof LegacyTranslogReader, equalTo(true)); try (final Translog.Snapshot snapshot = reader.newSnapshot()) { final Translog.Operation operation = snapshot.next(); - assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.SAVE, equalTo(true)); + assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.INDEX, equalTo(true)); Translog.Index op = (Translog.Index) operation; assertThat(op.id(), equalTo("1")); assertThat(op.type(), equalTo("doc")); @@ -73,8 +73,8 @@ public class TranslogVersionTests extends ESTestCase { Translog.Operation operation = snapshot.next(); - assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.CREATE, equalTo(true)); - Translog.Create op = (Translog.Create) operation; + assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.INDEX, equalTo(true)); + Translog.Index op = (Translog.Index) operation; assertThat(op.id(), equalTo("Bwiq98KFSb6YjJQGeSpeiw")); assertThat(op.type(), equalTo("doc")); assertThat(op.source().toUtf8(), equalTo("{\"body\": \"foo\"}")); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java index aaca771853b..e14cc226646 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerIT.java @@ -82,7 +82,7 @@ public class IndexingMemoryControllerIT extends ESIntegTestCase { index("test1", "type", "1", "f", 1); // make shard the shard buffer was set to inactive size - final ByteSizeValue inactiveBuffer = EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER; + final ByteSizeValue inactiveBuffer = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER; if (awaitBusy(() -> getIWBufferSize("test1") == inactiveBuffer.bytes()) == false) { fail("failed to update shard indexing buffer size for test1 index to [" + inactiveBuffer + "]; got: " + getIWBufferSize("test1")); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java index f6e21db396a..d3d9e961a61 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java @@ -22,13 +22,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -39,44 +43,28 @@ public class IndexingMemoryControllerTests extends ESTestCase { final static ByteSizeValue INACTIVE = new ByteSizeValue(-1); - final Map translogIds = new HashMap<>(); - final Map translogOps = new HashMap<>(); - final Map indexingBuffers = new HashMap<>(); final Map translogBuffers = new HashMap<>(); + final Map lastIndexTimeNanos = new HashMap<>(); + final Set activeShards = new HashSet<>(); + long currentTimeSec = TimeValue.timeValueNanos(System.nanoTime()).seconds(); public MockController(Settings settings) { super(Settings.builder() .put(SHARD_INACTIVE_INTERVAL_TIME_SETTING, "200h") // disable it - .put(SHARD_INACTIVE_TIME_SETTING, "0s") // immediate + .put(SHARD_INACTIVE_TIME_SETTING, "1ms") // nearly immediate .put(settings) .build(), null, null, 100 * 1024 * 1024); // fix jvm mem size to 100mb } - public void incTranslog(ShardId shard1, int id, int ops) { - setTranslog(shard1, translogIds.get(shard1) + id, translogOps.get(shard1) + ops); - } - - public void setTranslog(ShardId id, long translogId, long ops) { - translogIds.put(id, translogId); - translogOps.put(id, ops); - } - public void deleteShard(ShardId id) { - translogIds.remove(id); - translogOps.remove(id); indexingBuffers.remove(id); translogBuffers.remove(id); } - public void assertActive(ShardId id) { - assertThat(indexingBuffers.get(id), not(equalTo(INACTIVE))); - assertThat(translogBuffers.get(id), not(equalTo(INACTIVE))); - } - public void assertBuffers(ShardId id, ByteSizeValue indexing, ByteSizeValue translog) { assertThat(indexingBuffers.get(id), equalTo(indexing)); assertThat(translogBuffers.get(id), equalTo(translog)); @@ -94,29 +82,17 @@ public class IndexingMemoryControllerTests extends ESTestCase { @Override protected List availableShards() { - return new ArrayList<>(translogIds.keySet()); + return new ArrayList<>(indexingBuffers.keySet()); } @Override protected boolean shardAvailable(ShardId shardId) { - return translogIds.containsKey(shardId); + return indexingBuffers.containsKey(shardId); } @Override - protected void markShardAsInactive(ShardId shardId) { - indexingBuffers.put(shardId, INACTIVE); - translogBuffers.put(shardId, INACTIVE); - } - - @Override - protected ShardIndexingStatus getTranslogStatus(ShardId shardId) { - if (!shardAvailable(shardId)) { - return null; - } - ShardIndexingStatus status = new ShardIndexingStatus(); - status.translogId = translogIds.get(shardId); - status.translogNumberOfOperations = translogOps.get(shardId); - return status; + protected Boolean getShardActive(ShardId shardId) { + return activeShards.contains(shardId); } @Override @@ -125,12 +101,34 @@ public class IndexingMemoryControllerTests extends ESTestCase { translogBuffers.put(shardId, shardTranslogBufferSize); } + @Override + protected Boolean checkIdle(ShardId shardId, long inactiveTimeNS) { + Long ns = lastIndexTimeNanos.get(shardId); + if (ns == null) { + return null; + } else if (currentTimeInNanos() - ns >= inactiveTimeNS) { + indexingBuffers.put(shardId, INACTIVE); + translogBuffers.put(shardId, INACTIVE); + activeShards.remove(shardId); + return true; + } else { + return false; + } + } + public void incrementTimeSec(int sec) { currentTimeSec += sec; } - public void simulateFlush(ShardId shard) { - setTranslog(shard, translogIds.get(shard) + 1, 0); + public void simulateIndexing(ShardId shardId) { + lastIndexTimeNanos.put(shardId, currentTimeInNanos()); + if (indexingBuffers.containsKey(shardId) == false) { + // First time we are seeing this shard; start it off with inactive buffers as IndexShard does: + indexingBuffers.put(shardId, IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER); + translogBuffers.put(shardId, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER); + } + activeShards.add(shardId); + forceCheck(); } } @@ -139,14 +137,12 @@ public class IndexingMemoryControllerTests extends ESTestCase { .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "10mb") .put(IndexingMemoryController.TRANSLOG_BUFFER_SIZE_SETTING, "100kb").build()); final ShardId shard1 = new ShardId("test", 1); - controller.setTranslog(shard1, randomInt(10), randomInt(10)); - controller.forceCheck(); + controller.simulateIndexing(shard1); controller.assertBuffers(shard1, new ByteSizeValue(10, ByteSizeUnit.MB), new ByteSizeValue(64, ByteSizeUnit.KB)); // translog is maxed at 64K // add another shard final ShardId shard2 = new ShardId("test", 2); - controller.setTranslog(shard2, randomInt(10), randomInt(10)); - controller.forceCheck(); + controller.simulateIndexing(shard2); controller.assertBuffers(shard1, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); controller.assertBuffers(shard2, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); @@ -161,8 +157,7 @@ public class IndexingMemoryControllerTests extends ESTestCase { // add a new one final ShardId shard3 = new ShardId("test", 3); - controller.setTranslog(shard3, randomInt(10), randomInt(10)); - controller.forceCheck(); + controller.simulateIndexing(shard3); controller.assertBuffers(shard3, new ByteSizeValue(10, ByteSizeUnit.MB), new ByteSizeValue(64, ByteSizeUnit.KB)); // translog is maxed at 64K } @@ -174,48 +169,42 @@ public class IndexingMemoryControllerTests extends ESTestCase { .build()); final ShardId shard1 = new ShardId("test", 1); - controller.setTranslog(shard1, 0, 0); + controller.simulateIndexing(shard1); final ShardId shard2 = new ShardId("test", 2); - controller.setTranslog(shard2, 0, 0); - controller.forceCheck(); + controller.simulateIndexing(shard2); controller.assertBuffers(shard1, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); controller.assertBuffers(shard2, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); // index into both shards, move the clock and see that they are still active - controller.setTranslog(shard1, randomInt(2), randomInt(2) + 1); - controller.setTranslog(shard2, randomInt(2) + 1, randomInt(2)); - // the controller doesn't know when the ops happened, so even if this is more - // than the inactive time the shard is still marked as active + controller.simulateIndexing(shard1); + controller.simulateIndexing(shard2); + controller.incrementTimeSec(10); controller.forceCheck(); - controller.assertBuffers(shard1, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); - controller.assertBuffers(shard2, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); - // index into one shard only, see other shard is made inactive correctly - controller.incTranslog(shard1, randomInt(2), randomInt(2) + 1); - controller.forceCheck(); // register what happened with the controller (shard is still active) - controller.incrementTimeSec(3); // increment but not enough - controller.forceCheck(); - controller.assertBuffers(shard1, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); - controller.assertBuffers(shard2, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); + // both shards now inactive + controller.assertInActive(shard1); + controller.assertInActive(shard2); - controller.incrementTimeSec(3); // increment some more + // index into one shard only, see it becomes active + controller.simulateIndexing(shard1); + controller.assertBuffers(shard1, new ByteSizeValue(10, ByteSizeUnit.MB), new ByteSizeValue(64, ByteSizeUnit.KB)); + controller.assertInActive(shard2); + + controller.incrementTimeSec(3); // increment but not enough to become inactive controller.forceCheck(); controller.assertBuffers(shard1, new ByteSizeValue(10, ByteSizeUnit.MB), new ByteSizeValue(64, ByteSizeUnit.KB)); controller.assertInActive(shard2); - if (randomBoolean()) { - // once a shard gets inactive it will be synced flushed and a new translog generation will be made - controller.simulateFlush(shard2); - controller.forceCheck(); - controller.assertInActive(shard2); - } + controller.incrementTimeSec(3); // increment some more + controller.forceCheck(); + controller.assertInActive(shard1); + controller.assertInActive(shard2); // index some and shard becomes immediately active - controller.incTranslog(shard2, randomInt(2), 1 + randomInt(2)); // we must make sure translog ops is never 0 - controller.forceCheck(); - controller.assertBuffers(shard1, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); - controller.assertBuffers(shard2, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(50, ByteSizeUnit.KB)); + controller.simulateIndexing(shard2); + controller.assertInActive(shard1); + controller.assertBuffers(shard2, new ByteSizeValue(10, ByteSizeUnit.MB), new ByteSizeValue(64, ByteSizeUnit.KB)); } public void testMinShardBufferSizes() { @@ -273,10 +262,9 @@ public class IndexingMemoryControllerTests extends ESTestCase { protected void assertTwoActiveShards(MockController controller, ByteSizeValue indexBufferSize, ByteSizeValue translogBufferSize) { final ShardId shard1 = new ShardId("test", 1); - controller.setTranslog(shard1, 0, 0); + controller.simulateIndexing(shard1); final ShardId shard2 = new ShardId("test", 2); - controller.setTranslog(shard2, 0, 0); - controller.forceCheck(); + controller.simulateIndexing(shard2); controller.assertBuffers(shard1, indexBufferSize, translogBufferSize); controller.assertBuffers(shard2, indexBufferSize, translogBufferSize); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 234f3eada9a..4f0bd600be7 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -188,7 +188,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { for (String node : internalCluster().getNodeNames()) { final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesFieldDataCache.class, node); // Clean up the cache, ensuring that entries' listeners have been called - fdCache.getCache().cleanUp(); + fdCache.getCache().refresh(); } NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats() .clear().setBreaker(true).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerIT.java b/core/src/test/java/org/elasticsearch/plugins/PluginManagerIT.java index 1ffce8d71c2..e98794cfc99 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginManagerIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginManagerIT.java @@ -587,7 +587,6 @@ public class PluginManagerIT extends ESIntegTestCase { PluginManager.checkForOfficialPlugins("analysis-phonetic"); PluginManager.checkForOfficialPlugins("analysis-smartcn"); PluginManager.checkForOfficialPlugins("analysis-stempel"); - PluginManager.checkForOfficialPlugins("cloud-gce"); PluginManager.checkForOfficialPlugins("delete-by-query"); PluginManager.checkForOfficialPlugins("lang-expression"); PluginManager.checkForOfficialPlugins("lang-groovy"); @@ -598,6 +597,7 @@ public class PluginManagerIT extends ESIntegTestCase { PluginManager.checkForOfficialPlugins("discovery-multicast"); PluginManager.checkForOfficialPlugins("discovery-azure"); PluginManager.checkForOfficialPlugins("discovery-ec2"); + PluginManager.checkForOfficialPlugins("discovery-gce"); PluginManager.checkForOfficialPlugins("repository-azure"); PluginManager.checkForOfficialPlugins("repository-s3"); PluginManager.checkForOfficialPlugins("store-smb"); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java new file mode 100644 index 00000000000..374c77616c4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/plugins/PluginManagerPermissionTests.java @@ -0,0 +1,300 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.net.URL; +import java.nio.charset.Charset; +import java.nio.file.*; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.plugins.PluginInfoTests.writeProperties; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; + +// there are some lucene file systems that seem to cause problems (deleted files, dirs instead of files) +@LuceneTestCase.SuppressFileSystems("*") +public class PluginManagerPermissionTests extends ESTestCase { + + private String pluginName = "my-plugin"; + private CaptureOutputTerminal terminal = new CaptureOutputTerminal(); + private Environment environment; + private boolean supportsPermissions; + + @Before + public void setup() { + Path tempDir = createTempDir(); + Settings.Builder settingsBuilder = settingsBuilder().put("path.home", tempDir); + if (randomBoolean()) { + settingsBuilder.put("path.plugins", createTempDir()); + } + + if (randomBoolean()) { + settingsBuilder.put("path.conf", createTempDir()); + } + + environment = new Environment(settingsBuilder.build()); + + supportsPermissions = tempDir.getFileSystem().supportedFileAttributeViews().contains("posix"); + } + + public void testThatUnaccessibleBinDirectoryAbortsPluginInstallation() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + URL pluginUrl = createPlugin(true, randomBoolean()); + + Path binPath = environment.binFile().resolve(pluginName); + Files.createDirectories(binPath); + try { + Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("---------")); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + pluginManager.downloadAndExtract(pluginName, terminal); + + fail("Expected IOException but did not happen"); + } catch (IOException e) { + assertFileNotExists(environment.pluginsFile().resolve(pluginName)); + assertFileNotExists(environment.configFile().resolve(pluginName)); + // exists, because of our weird permissions above + assertDirectoryExists(environment.binFile().resolve(pluginName)); + + assertThat(terminal.getTerminalOutput(), hasItem(containsString("Error copying bin directory "))); + } finally { + Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("rwxrwxrwx")); + } + } + + public void testThatUnaccessiblePluginConfigDirectoryAbortsPluginInstallation() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + URL pluginUrl = createPlugin(randomBoolean(), true); + + Path path = environment.configFile().resolve(pluginName); + Files.createDirectories(path); + Files.createFile(path.resolve("my-custom-config.yaml")); + Path binPath = environment.binFile().resolve(pluginName); + Files.createDirectories(binPath); + + try { + Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("---------")); + Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("---------")); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + pluginManager.downloadAndExtract(pluginName, terminal); + + fail("Expected IOException but did not happen, terminal output was " + terminal.getTerminalOutput()); + } catch (IOException e) { + assertFileNotExists(environment.pluginsFile().resolve(pluginName)); + assertFileNotExists(environment.binFile().resolve(pluginName)); + // exists, because of our weird permissions above + assertDirectoryExists(environment.configFile().resolve(pluginName)); + + assertThat(terminal.getTerminalOutput(), hasItem(containsString("Error copying config directory "))); + } finally { + Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("rwxrwxrwx")); + Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("rwxrwxrwx")); + } + } + + // config/bin are not writable, but the plugin does not need to put anything into it + public void testThatPluginWithoutBinAndConfigWorksEvenIfPermissionsAreWrong() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + URL pluginUrl = createPlugin(false, false); + Path path = environment.configFile().resolve(pluginName); + Files.createDirectories(path); + Files.createFile(path.resolve("my-custom-config.yaml")); + Path binPath = environment.binFile().resolve(pluginName); + Files.createDirectories(binPath); + + try { + Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("---------")); + Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("---------")); + Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("---------")); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + pluginManager.downloadAndExtract(pluginName, terminal); + } finally { + Files.setPosixFilePermissions(binPath, PosixFilePermissions.fromString("rwxrwxrwx")); + Files.setPosixFilePermissions(path, PosixFilePermissions.fromString("rwxrwxrwx")); + Files.setPosixFilePermissions(path.resolve("my-custom-config.yaml"), PosixFilePermissions.fromString("rwxrwxrwx")); + } + + } + + // plugins directory no accessible, should leave no other left over directories + public void testThatNonWritablePluginsDirectoryLeavesNoLeftOver() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + URL pluginUrl = createPlugin(true, true); + Files.createDirectories(environment.pluginsFile()); + + try { + Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("---------")); + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + try { + pluginManager.downloadAndExtract(pluginName, terminal); + fail("Expected IOException due to read-only plugins/ directory"); + } catch (IOException e) { + assertFileNotExists(environment.binFile().resolve(pluginName)); + assertFileNotExists(environment.configFile().resolve(pluginName)); + + Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("rwxrwxrwx")); + assertDirectoryExists(environment.pluginsFile()); + assertFileNotExists(environment.pluginsFile().resolve(pluginName)); + } + } finally { + Files.setPosixFilePermissions(environment.pluginsFile(), PosixFilePermissions.fromString("rwxrwxrwx")); + } + } + + public void testThatUnwriteableBackupFilesInConfigurationDirectoryAreReplaced() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + boolean pluginContainsExecutables = randomBoolean(); + URL pluginUrl = createPlugin(pluginContainsExecutables, true); + Files.createDirectories(environment.configFile().resolve(pluginName)); + + Path configFile = environment.configFile().resolve(pluginName).resolve("my-custom-config.yaml"); + Files.createFile(configFile); + Path backupConfigFile = environment.configFile().resolve(pluginName).resolve("my-custom-config.yaml.new"); + Files.createFile(backupConfigFile); + Files.write(backupConfigFile, "foo".getBytes(Charset.forName("UTF-8"))); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + try { + Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("---------")); + + pluginManager.downloadAndExtract(pluginName, terminal); + + if (pluginContainsExecutables) { + assertDirectoryExists(environment.binFile().resolve(pluginName)); + } + assertDirectoryExists(environment.pluginsFile().resolve(pluginName)); + assertDirectoryExists(environment.configFile().resolve(pluginName)); + + assertFileExists(backupConfigFile); + Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("rw-rw-rw-")); + String content = new String(Files.readAllBytes(backupConfigFile), Charset.forName("UTF-8")); + assertThat(content, is(not("foo"))); + } finally { + Files.setPosixFilePermissions(backupConfigFile, PosixFilePermissions.fromString("rw-rw-rw-")); + } + } + + public void testThatConfigDirectoryBeingAFileAbortsInstallationAndDoesNotAccidentallyDeleteThisFile() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + Files.createDirectories(environment.configFile()); + Files.createFile(environment.configFile().resolve(pluginName)); + URL pluginUrl = createPlugin(randomBoolean(), true); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + + try { + pluginManager.downloadAndExtract(pluginName, terminal); + fail("Expected plugin installation to fail, but didnt"); + } catch (IOException e) { + assertFileExists(environment.configFile().resolve(pluginName)); + assertFileNotExists(environment.binFile().resolve(pluginName)); + assertFileNotExists(environment.pluginsFile().resolve(pluginName)); + } + } + + public void testThatBinDirectoryBeingAFileAbortsInstallationAndDoesNotAccidentallyDeleteThisFile() throws Exception { + assumeTrue("File system does not support permissions, skipping", supportsPermissions); + + Files.createDirectories(environment.binFile()); + Files.createFile(environment.binFile().resolve(pluginName)); + URL pluginUrl = createPlugin(true, randomBoolean()); + + PluginManager pluginManager = new PluginManager(environment, pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(10)); + + try { + pluginManager.downloadAndExtract(pluginName, terminal); + fail("Expected plugin installation to fail, but didnt"); + } catch (IOException e) { + assertFileExists(environment.binFile().resolve(pluginName)); + assertFileNotExists(environment.configFile().resolve(pluginName)); + assertFileNotExists(environment.pluginsFile().resolve(pluginName)); + } + } + + + private URL createPlugin(boolean withBinDir, boolean withConfigDir) throws IOException { + final Path structure = createTempDir().resolve("fake-plugin"); + writeProperties(structure, "description", "fake desc", + "version", "1.0", + "elasticsearch.version", Version.CURRENT.toString(), + "jvm", "true", + "java.version", "1.7", + "name", pluginName, + "classname", pluginName); + if (withBinDir) { + // create bin dir + Path binDir = structure.resolve("bin"); + Files.createDirectory(binDir); + Files.setPosixFilePermissions(binDir, PosixFilePermissions.fromString("rwxr-xr-x")); + + // create executable + Path executable = binDir.resolve("my-binary"); + Files.createFile(executable); + Files.setPosixFilePermissions(executable, PosixFilePermissions.fromString("rwxr-xr-x")); + } + if (withConfigDir) { + // create bin dir + Path configDir = structure.resolve("config"); + Files.createDirectory(configDir); + Files.setPosixFilePermissions(configDir, PosixFilePermissions.fromString("rwxr-xr-x")); + + // create config file + Path configFile = configDir.resolve("my-custom-config.yaml"); + Files.createFile(configFile); + Files.write(configFile, "my custom config content".getBytes(Charset.forName("UTF-8"))); + Files.setPosixFilePermissions(configFile, PosixFilePermissions.fromString("rw-r--r--")); + } + + Path zip = createTempDir().resolve(structure.getFileName() + ".zip"); + try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { + Files.walkFileTree(structure, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + stream.putNextEntry(new ZipEntry(structure.relativize(file).toString())); + Files.copy(file, stream); + return FileVisitResult.CONTINUE; + } + }); + } + return zip.toUri().toURL(); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index fe942dc9a52..ac4fcf89aef 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -19,12 +19,10 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; - -import com.google.common.collect.EvictingQueue; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; @@ -42,11 +40,7 @@ import org.junit.Test; import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.max; -import static org.elasticsearch.search.aggregations.AggregationBuilders.min; -import static org.elasticsearch.search.aggregations.AggregationBuilders.range; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -170,7 +164,7 @@ public class MovAvgIT extends ESIntegTestCase { */ private void setupExpected(MovAvgType type, MetricTarget target, int windowSize) { ArrayList values = new ArrayList<>(numBuckets); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { double metricValue; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java index 65e44b92a92..11c5e4035d6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; -import com.google.common.collect.EvictingQueue; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.pipeline.movavg.models.*; import org.elasticsearch.test.ESTestCase; import org.junit.Test; @@ -39,7 +39,7 @@ public class MovAvgUnitTests extends ESTestCase { int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < numValues; i++) { double randValue = randomDouble(); @@ -68,7 +68,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -94,7 +94,7 @@ public class MovAvgUnitTests extends ESTestCase { int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < numValues; i++) { double randValue = randomDouble(); @@ -126,7 +126,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1,50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -158,7 +158,7 @@ public class MovAvgUnitTests extends ESTestCase { int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < numValues; i++) { double randValue = randomDouble(); @@ -193,7 +193,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1,50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -227,7 +227,7 @@ public class MovAvgUnitTests extends ESTestCase { int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < numValues; i++) { double randValue = randomDouble(); @@ -276,7 +276,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -323,7 +323,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -392,7 +392,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data int numPredictions = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -465,7 +465,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } @@ -533,7 +533,7 @@ public class MovAvgUnitTests extends ESTestCase { int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data int numPredictions = randomIntBetween(1, 50); - EvictingQueue window = EvictingQueue.create(windowSize); + EvictingQueue window = new EvictingQueue<>(windowSize); for (int i = 0; i < windowSize; i++) { window.offer(randomDouble()); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java index af686674c75..ccd4dcbc136 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.aggregations.pipeline.serialdiff; -import com.google.common.collect.EvictingQueue; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; @@ -160,7 +160,7 @@ public class SerialDiffIT extends ESIntegTestCase { */ private void setupExpected(MetricTarget target) { ArrayList values = new ArrayList<>(numBuckets); - EvictingQueue lagWindow = EvictingQueue.create(lag); + EvictingQueue lagWindow = new EvictingQueue<>(lag); int counter = 0; for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index c2c2782caa6..983fb52bba9 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -823,33 +823,4 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { } } - - @Test - public void testExplainString() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate("test").addMapping( - "type1", - jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string") - .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject())); - ensureYellow(); - - - client().prepareIndex().setType("type1").setId("1").setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").array("num", 0.5, 0.7).endObject()).get(); - - refresh(); - - SearchResponse response = client().search( - searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().explain(true) - .query(functionScoreQuery(termQuery("test", "value"), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ - new FunctionScoreQueryBuilder.FilterFunctionBuilder(gaussDecayFunction("num", 1.0, 5.0, 1.0)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(linearDecayFunction("num", 1.0, 5.0, 1.0)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(exponentialDecayFunction("num", 1.0, 5.0, 1.0)) - }).boostMode(CombineFunction.REPLACE)))).get(); - String explanation = response.getHits().getAt(0).getExplanation().toString(); - assertThat(explanation, containsString(" 1.0 = exp(-0.5*pow(MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)],2.0)/18.033688011112044)")); - assertThat(explanation, containsString("1.0 = max(0.0, ((10.0 - MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)])/10.0)")); - assertThat(explanation, containsString("1.0 = exp(- MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)] * 0.13862943611198905)")); - - } } diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index e4e7a69670b..4aeb4161fde 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -27,7 +27,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.search.RestClearScrollAction; @@ -294,6 +297,7 @@ public class SearchScrollIT extends ESIntegTestCase { assertThat(clearResponse.isSucceeded(), is(true)); assertThat(clearResponse.getNumFreed(), greaterThan(0)); assertThat(clearResponse.status(), equalTo(RestStatus.OK)); + assertToXContentResponse(clearResponse, true, clearResponse.getNumFreed()); assertThrows(client().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); assertThrows(client().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); @@ -310,6 +314,7 @@ public class SearchScrollIT extends ESIntegTestCase { assertThat(response.isSucceeded(), is(true)); assertThat(response.getNumFreed(), equalTo(0)); assertThat(response.status(), equalTo(RestStatus.NOT_FOUND)); + assertToXContentResponse(response, true, response.getNumFreed()); } @Test @@ -404,6 +409,7 @@ public class SearchScrollIT extends ESIntegTestCase { assertThat(clearResponse.isSucceeded(), is(true)); assertThat(clearResponse.getNumFreed(), greaterThan(0)); assertThat(clearResponse.status(), equalTo(RestStatus.OK)); + assertToXContentResponse(clearResponse, true, clearResponse.getNumFreed()); assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); @@ -593,4 +599,19 @@ public class SearchScrollIT extends ESIntegTestCase { } } + private void assertToXContentResponse(ClearScrollResponse response, boolean succeed, int numFreed) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + BytesReference bytesReference = builder.bytes(); + Map map; + try (XContentParser parser = XContentFactory.xContent(bytesReference).createParser(bytesReference)) { + map = parser.map(); + } + + assertThat(map.get("succeeded"), is(succeed)); + assertThat(map.get("num_freed"), equalTo(numFreed)); + } } diff --git a/core/src/test/java/org/elasticsearch/similarity/SimilarityIT.java b/core/src/test/java/org/elasticsearch/similarity/SimilarityIT.java index 229f2a8ed61..d486cdba220 100644 --- a/core/src/test/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/core/src/test/java/org/elasticsearch/similarity/SimilarityIT.java @@ -30,7 +30,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class SimilarityIT extends ESIntegTestCase { - @Test public void testCustomBM25Similarity() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java b/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java index 843fc2e1abd..8b14ef06b06 100644 --- a/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java @@ -19,7 +19,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.google.common.collect.Iterators; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -31,11 +30,7 @@ import org.elasticsearch.common.transport.TransportAddress; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.Random; +import java.util.*; import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; @@ -247,7 +242,7 @@ public class CompositeTestCluster extends TestCluster { @Override public synchronized Iterator iterator() { - return Iterators.singletonIterator(client()); + return Collections.singleton(client()).iterator(); } /** diff --git a/core/src/test/java/org/elasticsearch/test/ESTestCase.java b/core/src/test/java/org/elasticsearch/test/ESTestCase.java index 8bbd978f226..78d004f43e4 100644 --- a/core/src/test/java/org/elasticsearch/test/ESTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESTestCase.java @@ -41,7 +41,6 @@ import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.cache.recycler.MockPageCacheRecycler; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.DjbHashFunction; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; import org.elasticsearch.common.logging.ESLogger; @@ -549,9 +548,6 @@ public abstract class ESTestCase extends LuceneTestCase { /** Return consistent index settings for the provided index version. */ public static Settings.Builder settings(Version version) { Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version); - if (version.before(Version.V_2_0_0_beta1)) { - builder.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, DjbHashFunction.class); - } return builder; } diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index f7c44a55ff7..22973d425a3 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -1852,7 +1852,7 @@ public final class InternalTestCluster extends TestCluster { for (NodeAndClient nodeAndClient : nodes.values()) { final IndicesFieldDataCache fdCache = getInstanceFromNode(IndicesFieldDataCache.class, nodeAndClient.node); // Clean up the cache, ensuring that entries' listeners have been called - fdCache.getCache().cleanUp(); + fdCache.getCache().refresh(); final String name = nodeAndClient.name; final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node); diff --git a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/UpdateTests.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java similarity index 68% rename from plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/UpdateTests.java rename to core/src/test/java/org/elasticsearch/update/UpdateIT.java index 3c2b23038f7..8c62d97349a 100644 --- a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/UpdateTests.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.messy.tests; +package org.elasticsearch.update; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; @@ -38,18 +38,19 @@ import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.script.SearchScript; +import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.io.IOException; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; @@ -58,20 +59,391 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; -public class UpdateTests extends ESIntegTestCase { +public class UpdateIT extends ESIntegTestCase { + + public static class PutFieldValuesScriptPlugin extends Plugin { + + public PutFieldValuesScriptPlugin() { + } + + @Override + public String name() { + return PutFieldValuesScriptEngine.NAME; + } + + @Override + public String description() { + return "Mock script engine for " + UpdateIT.class; + } + + public void onModule(ScriptModule module) { + module.addScriptEngine(PutFieldValuesScriptEngine.class); + } + + } + + public static class PutFieldValuesScriptEngine implements ScriptEngineService { + + public static final String NAME = "put_values"; + + @Override + public void close() throws IOException { + } + + @Override + public String[] types() { + return new String[] { NAME }; + } + + @Override + public String[] extensions() { + return types(); + } + + @Override + public boolean sandboxed() { + return true; + } + + @Override + public Object compile(String script) { + return new Object(); // unused + } + + @Override + public ExecutableScript executable(CompiledScript compiledScript, Map originalParams) { + return new ExecutableScript() { + + Map vars = new HashMap<>(); + + @Override + public void setNextVar(String name, Object value) { + vars.put(name, value); + } + + @Override + public Object run() { + Map ctx = (Map) vars.get("ctx"); + assertNotNull(ctx); + + Map params = new HashMap<>(originalParams); + + Map newCtx = (Map) params.remove("_ctx"); + if (newCtx != null) { + assertFalse(newCtx.containsKey("_source")); + ctx.putAll(newCtx); + } + + Map source = (Map) ctx.get("_source"); + source.putAll(params); + + return ctx; + } + + @Override + public Object unwrap(Object value) { + return value; + } + + }; + } + + @Override + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { + throw new UnsupportedOperationException(); + } + + @Override + public void scriptRemoved(CompiledScript script) { + } + + } + + public static class FieldIncrementScriptPlugin extends Plugin { + + public FieldIncrementScriptPlugin() { + } + + @Override + public String name() { + return FieldIncrementScriptEngine.NAME; + } + + @Override + public String description() { + return "Mock script engine for " + UpdateIT.class; + } + + public void onModule(ScriptModule module) { + module.addScriptEngine(FieldIncrementScriptEngine.class); + } + + } + + public static class FieldIncrementScriptEngine implements ScriptEngineService { + + public static final String NAME = "field_inc"; + + @Override + public void close() throws IOException { + } + + @Override + public String[] types() { + return new String[] { NAME }; + } + + @Override + public String[] extensions() { + return types(); + } + + @Override + public boolean sandboxed() { + return true; + } + + @Override + public Object compile(String script) { + return script; + } + + @Override + public ExecutableScript executable(CompiledScript compiledScript, Map params) { + final String field = (String) compiledScript.compiled(); + return new ExecutableScript() { + + Map vars = new HashMap<>(); + + @Override + public void setNextVar(String name, Object value) { + vars.put(name, value); + } + + @Override + public Object run() { + Map ctx = (Map) vars.get("ctx"); + assertNotNull(ctx); + Map source = (Map) ctx.get("_source"); + Number currentValue = (Number) source.get(field); + Number inc = params == null ? 1L : (Number) params.getOrDefault("inc", 1); + source.put(field, currentValue.longValue() + inc.longValue()); + return ctx; + } + + @Override + public Object unwrap(Object value) { + return value; + } + + }; + } + + @Override + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { + throw new UnsupportedOperationException(); + } + + @Override + public void scriptRemoved(CompiledScript script) { + } + + } + + public static class ScriptedUpsertScriptPlugin extends Plugin { + + public ScriptedUpsertScriptPlugin() { + } + + @Override + public String name() { + return ScriptedUpsertScriptEngine.NAME; + } + + @Override + public String description() { + return "Mock script engine for " + UpdateIT.class + ".testScriptedUpsert"; + } + + public void onModule(ScriptModule module) { + module.addScriptEngine(ScriptedUpsertScriptEngine.class); + } + + } + + public static class ScriptedUpsertScriptEngine implements ScriptEngineService { + + public static final String NAME = "scripted_upsert"; + + @Override + public void close() throws IOException { + } + + @Override + public String[] types() { + return new String[] { NAME }; + } + + @Override + public String[] extensions() { + return types(); + } + + @Override + public boolean sandboxed() { + return true; + } + + @Override + public Object compile(String script) { + return new Object(); // unused + } + + @Override + public ExecutableScript executable(CompiledScript compiledScript, Map params) { + return new ExecutableScript() { + + Map vars = new HashMap<>(); + + @Override + public void setNextVar(String name, Object value) { + vars.put(name, value); + } + + @Override + public Object run() { + Map ctx = (Map) vars.get("ctx"); + assertNotNull(ctx); + Map source = (Map) ctx.get("_source"); + Number payment = (Number) params.get("payment"); + Number oldBalance = (Number) source.get("balance"); + int deduction = "create".equals(ctx.get("op")) ? payment.intValue() / 2 : payment.intValue(); + source.put("balance", oldBalance.intValue() - deduction); + return ctx; + } + + @Override + public Object unwrap(Object value) { + return value; + } + + }; + } + + @Override + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { + throw new UnsupportedOperationException(); + } + + @Override + public void scriptRemoved(CompiledScript script) { + } + + } + + public static class ExtractContextInSourceScriptPlugin extends Plugin { + + public ExtractContextInSourceScriptPlugin() { + } + + @Override + public String name() { + return ExtractContextInSourceScriptEngine.NAME; + } + + @Override + public String description() { + return "Mock script engine for " + UpdateIT.class; + } + + public void onModule(ScriptModule module) { + module.addScriptEngine(ExtractContextInSourceScriptEngine.class); + } + + } + + public static class ExtractContextInSourceScriptEngine implements ScriptEngineService { + + public static final String NAME = "extract_ctx"; + + @Override + public void close() throws IOException { + } + + @Override + public String[] types() { + return new String[] { NAME }; + } + + @Override + public String[] extensions() { + return types(); + } + + @Override + public boolean sandboxed() { + return true; + } + + @Override + public Object compile(String script) { + return new Object(); // unused + } + + @Override + public ExecutableScript executable(CompiledScript compiledScript, Map params) { + return new ExecutableScript() { + + Map vars = new HashMap<>(); + + @Override + public void setNextVar(String name, Object value) { + vars.put(name, value); + } + + @Override + public Object run() { + Map ctx = (Map) vars.get("ctx"); + assertNotNull(ctx); + + Map source = (Map) ctx.get("_source"); + Map ctxWithoutSource = new HashMap<>(ctx); + ctxWithoutSource.remove("_source"); + source.put("update_context", ctxWithoutSource); + + return ctx; + } + + @Override + public Object unwrap(Object value) { + return value; + } + + }; + } + + @Override + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { + throw new UnsupportedOperationException(); + } + + @Override + public void scriptRemoved(CompiledScript script) { + } + + } @Override protected Collection> nodePlugins() { - return Collections.singleton(GroovyPlugin.class); + return Arrays.asList( + PutFieldValuesScriptPlugin.class, + FieldIncrementScriptPlugin.class, + ScriptedUpsertScriptPlugin.class, + ExtractContextInSourceScriptPlugin.class); } - + private void createTestIndex() throws Exception { logger.info("--> creating index test"); @@ -92,7 +464,7 @@ public class UpdateTests extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .execute().actionGet(); assertTrue(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -104,7 +476,7 @@ public class UpdateTests extends ESIntegTestCase { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .execute().actionGet(); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -119,14 +491,11 @@ public class UpdateTests extends ESIntegTestCase { public void testScriptedUpsert() throws Exception { createTestIndex(); ensureGreen(); - - // Script logic is + + // Script logic is // 1) New accounts take balance from "balance" in upsert doc and first payment is charged at 50% // 2) Existing accounts subtract full payment from balance stored in elasticsearch - - String script="int oldBalance=ctx._source.balance;"+ - "int deduction=ctx.op == \"create\" ? (payment/2) : payment;"+ - "ctx._source.balance=oldBalance-deduction;"; + int openingBalance=10; Map params = new HashMap<>(); @@ -137,7 +506,7 @@ public class UpdateTests extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) -.setScript(new Script(script, ScriptService.ScriptType.INLINE, null, params)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "scripted_upsert", params)) .execute().actionGet(); assertTrue(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -147,11 +516,11 @@ public class UpdateTests extends ESIntegTestCase { assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9")); } - // Now pay money for an existing account where balance is stored in es + // Now pay money for an existing account where balance is stored in es updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) -.setScript(new Script(script, ScriptService.ScriptType.INLINE, null, params)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "scripted_upsert", params)) .execute().actionGet(); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -198,7 +567,7 @@ public class UpdateTests extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) .setFields("_source") .execute().actionGet(); @@ -210,7 +579,7 @@ public class UpdateTests extends ESIntegTestCase { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) .setFields("_source") .execute().actionGet(); @@ -229,24 +598,24 @@ public class UpdateTests extends ESIntegTestCase { index("test", "type", "1", "text", "value"); // version is now 1 assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(2) .execute(), VersionConflictEngineException.class); client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(1).get(); + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(1).get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l)); // and again with a higher version.. client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("ctx._source.text = 'v3'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2).get(); + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v3"))).setVersion(2).get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l)); // after delete client().prepareDelete("test", "type", "1").get(); assertThrows(client().prepareUpdate("test", "type", "1") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(3) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(3) .execute(), DocumentMissingException.class); @@ -254,30 +623,30 @@ public class UpdateTests extends ESIntegTestCase { client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get(); assertThrows(client().prepareUpdate(indexOrAlias(), "type", "2") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(2) .setVersionType(VersionType.EXTERNAL).execute(), ActionRequestValidationException.class); + + // With force version + client().prepareUpdate(indexOrAlias(), "type", "2") + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v10"))) + .setVersion(10).setVersionType(VersionType.FORCE).get(); + + GetResponse get = get("test", "type", "2"); + assertThat(get.getVersion(), equalTo(10l)); + assertThat((String) get.getSource().get("text"), equalTo("v10")); + // upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index. client().prepareUpdate(indexOrAlias(), "type", "3") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))) .setVersion(10).setUpsert("{ \"text\": \"v0\" }").get(); - GetResponse get = get("test", "type", "3"); + get = get("test", "type", "3"); assertThat(get.getVersion(), equalTo(1l)); assertThat((String) get.getSource().get("text"), equalTo("v0")); - // With force version - client().prepareUpdate(indexOrAlias(), "type", "4") - .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)) - .setVersion(10).setVersionType(VersionType.FORCE).setUpsert("{ \"text\": \"v0\" }").get(); - - get = get("test", "type", "4"); - assertThat(get.getVersion(), equalTo(10l)); - assertThat((String) get.getSource().get("text"), equalTo("v0")); - - // retry on conflict is rejected: assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1").setVersion(10).setRetryOnConflict(5), ActionRequestValidationException.class); } @@ -286,7 +655,7 @@ public class UpdateTests extends ESIntegTestCase { public void testIndexAutoCreation() throws Exception { UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) .setFields("_source") .execute().actionGet(); @@ -304,7 +673,7 @@ public class UpdateTests extends ESIntegTestCase { try { client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx._source.field++", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); fail(); } catch (DocumentMissingException e) { // all is well @@ -313,7 +682,7 @@ public class UpdateTests extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(2L)); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -324,9 +693,9 @@ public class UpdateTests extends ESIntegTestCase { } Map params = new HashMap<>(); - params.put("count", 3); + params.put("inc", 3); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx._source.field += count", ScriptService.ScriptType.INLINE, null, params)).execute().actionGet(); + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", params)).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -338,7 +707,7 @@ public class UpdateTests extends ESIntegTestCase { // check noop updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx.op = 'none'", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("op", "none")))).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -350,7 +719,7 @@ public class UpdateTests extends ESIntegTestCase { // check delete updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx.op = 'delete'", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("op", "delete")))).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(4L)); assertFalse(updateResponse.isCreated()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -366,14 +735,14 @@ public class UpdateTests extends ESIntegTestCase { long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); client().prepareUpdate(indexOrAlias(), "type1", "2") - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); // check TTL update client().prepareUpdate(indexOrAlias(), "type1", "2") - .setScript(new Script("ctx._ttl = 3600000", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet(); + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("_ttl", 3600000)))).execute().actionGet(); getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); @@ -382,7 +751,7 @@ public class UpdateTests extends ESIntegTestCase { // check timestamp update client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefresh(true).execute().actionGet(); client().prepareUpdate(indexOrAlias(), "type1", "3") - .setScript(new Script("ctx._timestamp = \"2009-11-15T14:12:12\"", ScriptService.ScriptType.INLINE, null, null)).execute() + .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("_timestamp", "2009-11-15T14:12:12")))).execute() .actionGet(); getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet(); long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue(); @@ -391,7 +760,7 @@ public class UpdateTests extends ESIntegTestCase { // check fields parameter client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).setFields("_source", "field") + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).setFields("_source", "field") .execute().actionGet(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -452,7 +821,7 @@ public class UpdateTests extends ESIntegTestCase { try { client().prepareUpdate(indexOrAlias(), "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .execute().actionGet(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { @@ -468,7 +837,7 @@ public class UpdateTests extends ESIntegTestCase { ensureGreen(); try { client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .setDocAsUpsert(true) .execute().actionGet(); fail("Should have thrown ActionRequestValidationException"); @@ -523,57 +892,39 @@ public class UpdateTests extends ESIntegTestCase { .execute().actionGet(); // Update the first object and note context variables values - Map scriptParams = new HashMap<>(); - scriptParams.put("delim", "_"); UpdateResponse updateResponse = client().prepareUpdate("test", "subtype1", "id1") .setRouting("routing1") - .setScript( - new Script("assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n" - + - "assert ctx._type == \"subtype1\" : \"type should be \\\"subtype1\\\"\"\n" + - "assert ctx._id == \"id1\" : \"id should be \\\"id1\\\"\"\n" + - "assert ctx._version == 1 : \"version should be 1\"\n" + - "assert ctx._parent == \"parentId1\" : \"parent should be \\\"parentId1\\\"\"\n" + - "assert ctx._routing == \"routing1\" : \"routing should be \\\"routing1\\\"\"\n" + - "assert ctx._timestamp == " + timestamp + " : \"timestamp should be " + timestamp + "\"\n" + - // ttl has a 3-second leeway, because it's always counting down - "assert ctx._ttl <= " + ttl + " : \"ttl should be <= " + ttl + " but was \" + ctx._ttl\n" + - "assert ctx._ttl >= " + (ttl-3000) + " : \"ttl should be <= " + (ttl-3000) + " but was \" + ctx._ttl\n" + - "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n" + - "ctx._source.field1 += 1;\n", - ScriptService.ScriptType.INLINE, null, scriptParams)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); GetResponse getResponse = client().prepareGet("test", "subtype1", "id1").setRouting("routing1").execute().actionGet(); - assertEquals(2, getResponse.getSourceAsMap().get("field1")); - assertEquals("foo_foo", getResponse.getSourceAsMap().get("content")); + Map updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); + assertEquals("test", updateContext.get("_index")); + assertEquals("subtype1", updateContext.get("_type")); + assertEquals("id1", updateContext.get("_id")); + assertEquals(1, updateContext.get("_version")); + assertEquals("parentId1", updateContext.get("_parent")); + assertEquals("routing1", updateContext.get("_routing")); + assertThat(((Integer) updateContext.get("_ttl")).longValue(), allOf(greaterThanOrEqualTo(ttl-3000), lessThanOrEqualTo(ttl))); // Idem with the second object - scriptParams = new HashMap<>(); - scriptParams.put("delim", "_"); updateResponse = client().prepareUpdate("test", "type1", "parentId1") - .setScript( - new Script( - "assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n" + - "assert ctx._type == \"type1\" : \"type should be \\\"type1\\\"\"\n" + - "assert ctx._id == \"parentId1\" : \"id should be \\\"parentId1\\\"\"\n" + - "assert ctx._version == 1 : \"version should be 1\"\n" + - "assert ctx._parent == null : \"parent should be null\"\n" + - "assert ctx._routing == null : \"routing should be null\"\n" + - "assert ctx._timestamp == " + (timestamp - 1) + " : \"timestamp should be " + (timestamp - 1) + "\"\n" + - "assert ctx._ttl == null : \"ttl should be null\"\n" + - "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n" + - "ctx._source.field1 += 1;\n", - ScriptService.ScriptType.INLINE, null, scriptParams)) + .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); getResponse = client().prepareGet("test", "type1", "parentId1").execute().actionGet(); - assertEquals(1, getResponse.getSourceAsMap().get("field1")); - assertEquals("bar_bar", getResponse.getSourceAsMap().get("content")); + updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); + assertEquals("test", updateContext.get("_index")); + assertEquals("type1", updateContext.get("_type")); + assertEquals("parentId1", updateContext.get("_id")); + assertEquals(1, updateContext.get("_version")); + assertNull(updateContext.get("_parent")); + assertNull(updateContext.get("_routing")); + assertNull(updateContext.get("_ttl")); } @Test @@ -597,13 +948,13 @@ public class UpdateTests extends ESIntegTestCase { for (int i = 0; i < numberOfUpdatesPerThread; i++) { if (useBulkApi) { UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i)) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()); client().prepareBulk().add(updateRequestBuilder).execute().actionGet(); } else { client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i)) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .execute().actionGet(); @@ -723,7 +1074,7 @@ public class UpdateTests extends ESIntegTestCase { updateRequestsOutstanding.acquire(); try { UpdateRequest ur = client().prepareUpdate("test", "type1", Integer.toString(j)) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .setRetryOnConflict(retryOnConflict) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .request(); @@ -823,7 +1174,7 @@ public class UpdateTests extends ESIntegTestCase { //All the previous operations should be complete or failed at this point for (int i = 0; i < numberOfIdsPerThread; ++i) { UpdateResponse ur = client().prepareUpdate("test", "type1", Integer.toString(i)) - .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/validate/RenderSearchTemplateIT.java b/core/src/test/java/org/elasticsearch/validate/RenderSearchTemplateIT.java index b22dff7c761..10812c15555 100644 --- a/core/src/test/java/org/elasticsearch/validate/RenderSearchTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/validate/RenderSearchTemplateIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.validate; -import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse; +import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -61,7 +61,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "bar"); params.put("size", 20); Template template = new Template(TEMPLATE_CONTENTS, ScriptType.INLINE, MustacheScriptEngineService.NAME, XContentType.JSON, params); - RenderSearchTemplateResponse response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + RenderSearchTemplateResponse response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); BytesReference source = response.source(); assertThat(source, notNullValue()); @@ -75,7 +75,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "baz"); params.put("size", 100); template = new Template(TEMPLATE_CONTENTS, ScriptType.INLINE, MustacheScriptEngineService.NAME, XContentType.JSON, params); - response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); source = response.source(); assertThat(source, notNullValue()); @@ -91,7 +91,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "bar"); params.put("size", 20); Template template = new Template("index_template_1", ScriptType.INDEXED, MustacheScriptEngineService.NAME, XContentType.JSON, params); - RenderSearchTemplateResponse response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + RenderSearchTemplateResponse response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); BytesReference source = response.source(); assertThat(source, notNullValue()); @@ -105,7 +105,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "baz"); params.put("size", 100); template = new Template("index_template_1", ScriptType.INDEXED, MustacheScriptEngineService.NAME, XContentType.JSON, params); - response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); source = response.source(); assertThat(source, notNullValue()); @@ -121,7 +121,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "bar"); params.put("size", 20); Template template = new Template("file_template_1", ScriptType.FILE, MustacheScriptEngineService.NAME, XContentType.JSON, params); - RenderSearchTemplateResponse response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + RenderSearchTemplateResponse response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); BytesReference source = response.source(); assertThat(source, notNullValue()); @@ -135,7 +135,7 @@ public class RenderSearchTemplateIT extends ESIntegTestCase { params.put("value", "baz"); params.put("size", 100); template = new Template("file_template_1", ScriptType.FILE, MustacheScriptEngineService.NAME, XContentType.JSON, params); - response = client().admin().indices().prepareRenderSearchTemplate().template(template).get(); + response = client().admin().cluster().prepareRenderSearchTemplate().template(template).get(); assertThat(response, notNullValue()); source = response.source(); assertThat(source, notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 5296e763cc3..93c29e0c922 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -18,15 +18,6 @@ */ package org.elasticsearch.versioning; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkResponse; @@ -37,12 +28,15 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.FlushNotAllowedEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; @@ -100,7 +94,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { } // deleting with a lower version works. - long v= randomIntBetween(12,14); + long v = randomIntBetween(12, 14); DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(v).setVersionType(VersionType.FORCE).get(); assertThat(deleteResponse.isFound(), equalTo(true)); assertThat(deleteResponse.getVersion(), equalTo(v)); @@ -136,7 +130,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { VersionConflictEngineException.class); // Delete with a higher or equal version deletes all versions up to the given one. - long v= randomIntBetween(14,17); + long v = randomIntBetween(14, 17); DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(v).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet(); assertThat(deleteResponse.isFound(), equalTo(true)); assertThat(deleteResponse.getVersion(), equalTo(v)); @@ -165,7 +159,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { assertThat(indexResponse.getVersion(), equalTo(14l)); assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(), - VersionConflictEngineException.class); + VersionConflictEngineException.class); if (randomBoolean()) { refresh(); @@ -176,8 +170,8 @@ public class SimpleVersioningIT extends ESIntegTestCase { // deleting with a lower version fails. assertThrows( - client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(), - VersionConflictEngineException.class); + client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(), + VersionConflictEngineException.class); // Delete with a higher version deletes all versions up to the given one. DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet(); @@ -186,8 +180,8 @@ public class SimpleVersioningIT extends ESIntegTestCase { // Deleting with a lower version keeps on failing after a delete. assertThrows( - client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(), - VersionConflictEngineException.class); + client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(), + VersionConflictEngineException.class); // But delete with a higher version is OK. @@ -206,8 +200,8 @@ public class SimpleVersioningIT extends ESIntegTestCase { assertThat(deleteResponse.getVersion(), equalTo(20l)); // Make sure that the next delete will be GC. Note we do it on the index settings so it will be cleaned up - HashMap newSettings = new HashMap<>(); - newSettings.put("index.gc_deletes",-1); + HashMap newSettings = new HashMap<>(); + newSettings.put("index.gc_deletes", -1); client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet(); Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance... @@ -221,7 +215,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { public void testRequireUnitsOnUpdateSettings() throws Exception { createIndex("test"); ensureGreen(); - HashMap newSettings = new HashMap<>(); + HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "42"); try { client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet(); @@ -262,22 +256,12 @@ public class SimpleVersioningIT extends ESIntegTestCase { VersionConflictEngineException.class); assertThrows( - client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(), - VersionConflictEngineException.class); - - assertThrows( - client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(), - VersionConflictEngineException.class); - assertThrows( - client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(), + client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(), VersionConflictEngineException.class); assertThrows( - client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(), - DocumentAlreadyExistsException.class); - assertThrows( - client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(), - DocumentAlreadyExistsException.class); + client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").execute(), + VersionConflictEngineException.class); assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class); @@ -334,10 +318,8 @@ public class SimpleVersioningIT extends ESIntegTestCase { assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(), VersionConflictEngineException.class); - assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(), - VersionConflictEngineException.class); - assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(), + assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").execute(), VersionConflictEngineException.class); assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class); @@ -377,90 +359,94 @@ public class SimpleVersioningIT extends ESIntegTestCase { IDSource ids; final Random random = getRandom(); switch (random.nextInt(6)) { - case 0: - // random simple - if (VERBOSE) { - System.out.println("TEST: use random simple ids"); - } - ids = new IDSource() { + case 0: + // random simple + if (VERBOSE) { + System.out.println("TEST: use random simple ids"); + } + ids = new IDSource() { @Override public String next() { return TestUtil.randomSimpleString(random); } }; - break; - case 1: - // random realistic unicode - if (VERBOSE) { - System.out.println("TEST: use random realistic unicode ids"); - } - ids = new IDSource() { + break; + case 1: + // random realistic unicode + if (VERBOSE) { + System.out.println("TEST: use random realistic unicode ids"); + } + ids = new IDSource() { @Override public String next() { return TestUtil.randomRealisticUnicodeString(random); } }; - break; - case 2: - // sequential - if (VERBOSE) { - System.out.println("TEST: use seuquential ids"); - } - ids = new IDSource() { + break; + case 2: + // sequential + if (VERBOSE) { + System.out.println("TEST: use seuquential ids"); + } + ids = new IDSource() { int upto; + @Override public String next() { return Integer.toString(upto++); } }; - break; - case 3: - // zero-pad sequential - if (VERBOSE) { - System.out.println("TEST: use zero-pad seuquential ids"); - } - ids = new IDSource() { + break; + case 3: + // zero-pad sequential + if (VERBOSE) { + System.out.println("TEST: use zero-pad seuquential ids"); + } + ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random, 4, 20) + "d", 0); int upto; + @Override public String next() { String s = Integer.toString(upto++); return zeroPad.substring(zeroPad.length() - s.length()) + s; } }; - break; - case 4: - // random long - if (VERBOSE) { - System.out.println("TEST: use random long ids"); - } - ids = new IDSource() { + break; + case 4: + // random long + if (VERBOSE) { + System.out.println("TEST: use random long ids"); + } + ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); int upto; + @Override public String next() { return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix); } }; - break; - case 5: - // zero-pad random long - if (VERBOSE) { - System.out.println("TEST: use zero-pad random long ids"); - } - ids = new IDSource() { + break; + case 5: + // zero-pad random long + if (VERBOSE) { + System.out.println("TEST: use zero-pad random long ids"); + } + ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%015d", 0); int upto; + @Override public String next() { return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix); } }; - break; - default: - throw new AssertionError(); + break; + default: + throw new AssertionError(); } return ids; @@ -530,7 +516,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { } else { sb.append(" response: null"); } - + return sb.toString(); } } @@ -547,7 +533,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { // TODO: not great we don't test deletes GC here: // We test deletes, but can't rely on wall-clock delete GC: - HashMap newSettings = new HashMap<>(); + HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "1000000h"); assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet()); @@ -584,14 +570,14 @@ public class SimpleVersioningIT extends ESIntegTestCase { // Attach random versions to them: long version = 0; - final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs/2, numIDs*(TEST_NIGHTLY ? 8 : 2))]; - final Map truth = new HashMap<>(); + final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs / 2, numIDs * (TEST_NIGHTLY ? 8 : 2))]; + final Map truth = new HashMap<>(); if (VERBOSE) { System.out.println("TEST: use " + numIDs + " ids; " + idVersions.length + " operations"); } - for(int i=0;i 0; i--) { + for (int i = idVersions.length - 1; i > 0; i--) { int index = random.nextInt(i + 1); IDAndVersion x = idVersions[index]; idVersions[index] = idVersions[i]; @@ -620,7 +606,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { } if (VERBOSE) { - for(IDAndVersion idVersion : idVersions) { + for (IDAndVersion idVersion : idVersions) { System.out.println("id=" + idVersion.id + " version=" + idVersion.version + " delete?=" + idVersion.delete + " truth?=" + (truth.get(idVersion.id) == idVersion)); } } @@ -629,109 +615,87 @@ public class SimpleVersioningIT extends ESIntegTestCase { final CountDownLatch startingGun = new CountDownLatch(1); Thread[] threads = new Thread[TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 20 : 5)]; final long startTime = System.nanoTime(); - for(int i=0;i= idVersions.length) { - break; - } - if (VERBOSE && index % 100 == 0) { - System.out.println(Thread.currentThread().getName() + ": index=" + index); - } - IDAndVersion idVersion = idVersions[index]; + int index = upto.getAndIncrement(); + if (index >= idVersions.length) { + break; + } + if (VERBOSE && index % 100 == 0) { + System.out.println(Thread.currentThread().getName() + ": index=" + index); + } + IDAndVersion idVersion = idVersions[index]; - String id = idVersion.id; - idVersion.threadID = threadID; - idVersion.indexStartTime = System.nanoTime()-startTime; - long version = idVersion.version; - if (idVersion.delete) { - try { - idVersion.response = client().prepareDelete("test", "type", id) + String id = idVersion.id; + idVersion.threadID = threadID; + idVersion.indexStartTime = System.nanoTime() - startTime; + long version = idVersion.version; + if (idVersion.delete) { + try { + idVersion.response = client().prepareDelete("test", "type", id) .setVersion(version) .setVersionType(VersionType.EXTERNAL).execute().actionGet(); - } catch (VersionConflictEngineException vcee) { - // OK: our version is too old - assertThat(version, lessThanOrEqualTo(truth.get(id).version)); - idVersion.versionConflict = true; - } - } else { - for (int x=0;x<2;x++) { - // Try create first: - - IndexRequest.OpType op; - if (x == 0) { - op = IndexRequest.OpType.CREATE; - } else { - op = IndexRequest.OpType.INDEX; - } - - // index document - try { - idVersion.response = client().prepareIndex("test", "type", id) - .setSource("foo", "bar") - .setOpType(op) - .setVersion(version) - .setVersionType(VersionType.EXTERNAL).execute().actionGet(); - break; - } catch (DocumentAlreadyExistsException daee) { - if (x == 0) { - // OK: id was already indexed by another thread, now use index: - idVersion.alreadyExists = true; - } else { - // Should not happen with op=INDEX: - throw daee; - } - } catch (VersionConflictEngineException vcee) { - // OK: our version is too old - assertThat(version, lessThanOrEqualTo(truth.get(id).version)); - idVersion.versionConflict = true; - } - } + } catch (VersionConflictEngineException vcee) { + // OK: our version is too old + assertThat(version, lessThanOrEqualTo(truth.get(id).version)); + idVersion.versionConflict = true; } - idVersion.indexFinishTime = System.nanoTime()-startTime; + } else { + try { + idVersion.response = client().prepareIndex("test", "type", id) + .setSource("foo", "bar") + .setVersion(version).setVersionType(VersionType.EXTERNAL).get(); - if (threadRandom.nextInt(100) == 7) { - System.out.println(threadID + ": TEST: now refresh at " + (System.nanoTime()-startTime)); - refresh(); - System.out.println(threadID + ": TEST: refresh done at " + (System.nanoTime()-startTime)); - } - if (threadRandom.nextInt(100) == 7) { - System.out.println(threadID + ": TEST: now flush at " + (System.nanoTime()-startTime)); - try { - flush(); - } catch (FlushNotAllowedEngineException fnaee) { - // OK - } - System.out.println(threadID + ": TEST: flush done at " + (System.nanoTime()-startTime)); + } catch (VersionConflictEngineException vcee) { + // OK: our version is too old + assertThat(version, lessThanOrEqualTo(truth.get(id).version)); + idVersion.versionConflict = true; } } - } catch (Exception e) { - throw new RuntimeException(e); + idVersion.indexFinishTime = System.nanoTime() - startTime; + + if (threadRandom.nextInt(100) == 7) { + System.out.println(threadID + ": TEST: now refresh at " + (System.nanoTime() - startTime)); + refresh(); + System.out.println(threadID + ": TEST: refresh done at " + (System.nanoTime() - startTime)); + } + if (threadRandom.nextInt(100) == 7) { + System.out.println(threadID + ": TEST: now flush at " + (System.nanoTime() - startTime)); + try { + flush(); + } catch (FlushNotAllowedEngineException fnaee) { + // OK + } + System.out.println(threadID + ": TEST: flush done at " + (System.nanoTime() - startTime)); + } } + } catch (Exception e) { + throw new RuntimeException(e); } - }; + } + }; threads[i].start(); } startingGun.countDown(); - for(Thread thread : threads) { + for (Thread thread : threads) { thread.join(); } // Verify against truth: boolean failed = false; - for(String id : ids) { + for (String id : ids) { long expected; IDAndVersion idVersion = truth.get(id); if (idVersion != null && idVersion.delete == false) { @@ -748,7 +712,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { if (failed) { System.out.println("All versions:"); - for(int i=0;i newSettings = new HashMap<>(); + HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "10ms"); newSettings.put("index.refresh_interval", "-1"); client() - .admin() - .indices() - .prepareUpdateSettings("test") - .setSettings(newSettings) - .execute() - .actionGet(); + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(newSettings) + .execute() + .actionGet(); // Index a doc: client() - .prepareIndex("test", "type", "id") - .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) - .setVersion(10) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .prepareIndex("test", "type", "id") + .setSource("foo", "bar") + .setOpType(IndexRequest.OpType.INDEX) + .setVersion(10) + .setVersionType(VersionType.EXTERNAL) + .execute() + .actionGet(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -798,20 +762,20 @@ public class SimpleVersioningIT extends ESIntegTestCase { // Delete it client() - .prepareDelete("test", "type", "id") - .setVersion(11) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .prepareDelete("test", "type", "id") + .setVersion(11) + .setVersionType(VersionType.EXTERNAL) + .execute() + .actionGet(); // Real-time get should reflect delete: assertThat("doc should have been deleted", - client() - .prepareGet("test", "type", "id") - .execute() - .actionGet() - .getVersion(), - equalTo(-1L)); + client() + .prepareGet("test", "type", "id") + .execute() + .actionGet() + .getVersion(), + equalTo(-1L)); // ThreadPool.estimatedTimeInMillis has default granularity of 200 msec, so we must sleep at least that long; sleep much longer in // case system is busy: @@ -819,20 +783,20 @@ public class SimpleVersioningIT extends ESIntegTestCase { // Delete an unrelated doc (provokes pruning deletes from versionMap) client() - .prepareDelete("test", "type", "id2") - .setVersion(11) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .prepareDelete("test", "type", "id2") + .setVersion(11) + .setVersionType(VersionType.EXTERNAL) + .execute() + .actionGet(); // Real-time get should still reflect delete: assertThat("doc should have been deleted", - client() - .prepareGet("test", "type", "id") - .execute() - .actionGet() - .getVersion(), - equalTo(-1L)); + client() + .prepareGet("test", "type", "id") + .execute() + .actionGet() + .getVersion(), + equalTo(-1L)); } @Test @@ -842,25 +806,25 @@ public class SimpleVersioningIT extends ESIntegTestCase { ensureGreen(); // We test deletes, but can't rely on wall-clock delete GC: - HashMap newSettings = new HashMap<>(); + HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "0ms"); client() - .admin() - .indices() - .prepareUpdateSettings("test") - .setSettings(newSettings) - .execute() - .actionGet(); + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(newSettings) + .execute() + .actionGet(); // Index a doc: client() - .prepareIndex("test", "type", "id") - .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) - .setVersion(10) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .prepareIndex("test", "type", "id") + .setSource("foo", "bar") + .setOpType(IndexRequest.OpType.INDEX) + .setVersion(10) + .setVersionType(VersionType.EXTERNAL) + .execute() + .actionGet(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -869,19 +833,19 @@ public class SimpleVersioningIT extends ESIntegTestCase { // Delete it client() - .prepareDelete("test", "type", "id") - .setVersion(11) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .prepareDelete("test", "type", "id") + .setVersion(11) + .setVersionType(VersionType.EXTERNAL) + .execute() + .actionGet(); // Real-time get should reflect delete even though index.gc_deletes is 0: assertThat("doc should have been deleted", - client() - .prepareGet("test", "type", "id") - .execute() - .actionGet() - .getVersion(), - equalTo(-1L)); + client() + .prepareGet("test", "type", "id") + .execute() + .actionGet() + .getVersion(), + equalTo(-1L)); } } diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip new file mode 100644 index 00000000000..44e78b4a905 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip new file mode 100644 index 00000000000..b15cedf4d66 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-0.90.0.Beta1.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.0.Beta1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.0.Beta1.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.0.Beta1.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.0.RC1.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.0.RC1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.0.RC1.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.0.RC1.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.0.RC2.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.0.RC2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.0.RC2.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.0.RC2.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.0.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.0.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.0.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.1.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.1.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.1.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.10.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.10.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.10.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.10.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.11.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.11.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.11.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.11.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.12.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.12.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.12.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.12.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.13.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.13.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.13.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.13.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.2.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.2.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.2.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.3.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.3.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.3.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.4.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.4.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.4.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.5.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.5.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.5.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.5.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.6.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.6.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.6.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.6.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.7.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.7.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.7.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.7.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.8.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.8.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.8.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.8.zip diff --git a/core/src/test/resources/indices/bwc/index-0.90.9.zip b/core/src/test/resources/indices/bwc/unsupported-0.90.9.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-0.90.9.zip rename to core/src/test/resources/indices/bwc/unsupported-0.90.9.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.0.Beta1.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.0.Beta1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.0.Beta1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.0.Beta1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.0.Beta2.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.0.Beta2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.0.Beta2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.0.Beta2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.0.RC1.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.0.RC1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.0.RC1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.0.RC1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.0.RC2.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.0.RC2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.0.RC2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.0.RC2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.0.3.zip b/core/src/test/resources/indices/bwc/unsupported-1.0.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.0.3.zip rename to core/src/test/resources/indices/bwc/unsupported-1.0.3.zip diff --git a/core/src/test/resources/indices/bwc/index-1.1.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.1.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.1.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.1.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.1.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.1.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.1.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.1.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.1.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.1.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.1.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.1.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.2.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.2.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.2.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.2.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.2.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.2.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.2.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.2.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.2.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.2.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.2.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.2.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.2.3.zip b/core/src/test/resources/indices/bwc/unsupported-1.2.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.2.3.zip rename to core/src/test/resources/indices/bwc/unsupported-1.2.3.zip diff --git a/core/src/test/resources/indices/bwc/index-1.2.4.zip b/core/src/test/resources/indices/bwc/unsupported-1.2.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.2.4.zip rename to core/src/test/resources/indices/bwc/unsupported-1.2.4.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.3.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.3.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.3.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.4.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.4.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.4.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.5.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.5.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.5.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.5.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.6.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.6.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.6.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.6.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.7.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.7.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.7.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.7.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.8.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.8.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.8.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.8.zip diff --git a/core/src/test/resources/indices/bwc/index-1.3.9.zip b/core/src/test/resources/indices/bwc/unsupported-1.3.9.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.3.9.zip rename to core/src/test/resources/indices/bwc/unsupported-1.3.9.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.0.Beta1.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.0.Beta1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.0.Beta1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.0.Beta1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.3.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.3.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.3.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.4.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.4.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.4.zip diff --git a/core/src/test/resources/indices/bwc/index-1.4.5.zip b/core/src/test/resources/indices/bwc/unsupported-1.4.5.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.4.5.zip rename to core/src/test/resources/indices/bwc/unsupported-1.4.5.zip diff --git a/core/src/test/resources/indices/bwc/index-1.5.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.5.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.5.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.5.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.5.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.5.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.5.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.5.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.5.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.5.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.5.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.5.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.6.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.6.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.6.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.6.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.6.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.6.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.6.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.6.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.6.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.6.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.6.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.6.2.zip diff --git a/core/src/test/resources/indices/bwc/index-1.7.0.zip b/core/src/test/resources/indices/bwc/unsupported-1.7.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.7.0.zip rename to core/src/test/resources/indices/bwc/unsupported-1.7.0.zip diff --git a/core/src/test/resources/indices/bwc/index-1.7.1.zip b/core/src/test/resources/indices/bwc/unsupported-1.7.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.7.1.zip rename to core/src/test/resources/indices/bwc/unsupported-1.7.1.zip diff --git a/core/src/test/resources/indices/bwc/index-1.7.2.zip b/core/src/test/resources/indices/bwc/unsupported-1.7.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/index-1.7.2.zip rename to core/src/test/resources/indices/bwc/unsupported-1.7.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.0.Beta2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.Beta2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.0.Beta2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.Beta2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.0.RC1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.RC1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.0.RC1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.RC1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.0.RC2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.RC2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.0.RC2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.RC2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.0.3.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.0.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.0.3.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.0.3.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.1.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.1.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.1.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.1.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.1.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.1.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.1.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.1.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.1.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.1.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.1.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.1.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.2.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.2.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.2.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.2.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.2.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.2.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.2.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.2.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.2.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.2.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.2.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.2.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.2.3.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.2.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.2.3.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.2.3.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.2.4.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.2.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.2.4.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.2.4.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.3.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.3.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.3.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.4.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.4.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.4.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.5.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.5.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.5.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.5.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.6.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.6.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.6.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.6.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.7.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.7.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.7.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.7.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.8.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.8.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.8.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.8.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.3.9.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.3.9.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.3.9.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.3.9.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.0.Beta1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.0.Beta1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.0.Beta1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.0.Beta1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.3.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.3.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.3.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.3.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.4.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.4.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.4.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.4.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.4.5.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.4.5.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.4.5.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.4.5.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.5.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.5.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.5.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.5.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.5.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.5.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.5.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.5.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.5.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.5.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.5.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.5.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.6.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.6.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.6.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.6.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.6.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.6.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.6.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.6.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.6.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.6.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.6.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.6.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.7.0.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.0.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.7.0.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.7.0.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.7.1.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.1.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.7.1.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.7.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-1.7.2.zip b/core/src/test/resources/indices/bwc/unsupportedrepo-1.7.2.zip similarity index 100% rename from core/src/test/resources/indices/bwc/repo-1.7.2.zip rename to core/src/test/resources/indices/bwc/unsupportedrepo-1.7.2.zip diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip b/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip deleted file mode 100644 index 5772361979a..00000000000 Binary files a/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip and /dev/null differ diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip b/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip deleted file mode 100644 index 2fffc0b4014..00000000000 Binary files a/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip and /dev/null differ diff --git a/dev-tools/prepare_release_candidate.py b/dev-tools/prepare_release_candidate.py index 5f8389372c4..24450a63559 100644 --- a/dev-tools/prepare_release_candidate.py +++ b/dev-tools/prepare_release_candidate.py @@ -63,7 +63,7 @@ To install the deb from an APT repo: APT line sources.list line: -deb http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/debian/ stable main +deb http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/debian/ stable main To install the RPM, create a YUM file like: @@ -73,7 +73,7 @@ containing: [elasticsearch-2.0] name=Elasticsearch repository for packages -baseurl=http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/centos +baseurl=http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/centos gpgcheck=1 gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 @@ -300,7 +300,7 @@ if __name__ == "__main__": ensure_checkout_is_clean() if not re.match('(\d+\.\d+)\.*',release_version): raise RuntimeError('illegal release version format: %s' % (release_version)) - major_minor_version = re.match('(\d+\.\d+)\.*',release_version).group(1) + package_repo_version = '%s.x' % re.match('(\d+)\.*', release_version).group(1) print('*** Preparing release version: [%s]' % release_version) @@ -348,13 +348,13 @@ if __name__ == "__main__": # repository push commands s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://%s/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, bucket, release_version, shortHash) s3_bucket_sync_to = '%s/elasticsearch/staging/%s-%s/repos/' % (bucket, release_version, shortHash) - s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (major_minor_version, s3_bucket_sync_to) + s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (package_repo_version, s3_bucket_sync_to) - debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, major_minor_version) + debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, package_repo_version) debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b %s --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, bucket, debs3_prefix, gpg_key) debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix) debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix) - rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, major_minor_version) + rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version) rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 0 %s' % (bucket, rpms3_prefix, rpm) if deploy_s3: @@ -397,7 +397,7 @@ if __name__ == "__main__": print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!') print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:') - string_format_dict = {'version' : release_version, 'hash': shortHash, 'major_minor_version' : major_minor_version, 'bucket': bucket} + string_format_dict = {'version' : release_version, 'hash': shortHash, 'package_repo_version' : package_repo_version, 'bucket': bucket} print(MAIL_TEMPLATE % string_format_dict) print('') @@ -406,7 +406,7 @@ if __name__ == "__main__": print('') print('To publish the release and the repo on S3 execute the following commands:') - print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(major_minor_version)s' % string_format_dict) + print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(package_repo_version)s' % string_format_dict) print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://%(bucket)s/elasticsearch/release/org' % string_format_dict) print('Now go ahead and tag the release:') print(' git tag -a v%(version)s %(hash)s' % string_format_dict) diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 3bb11ca7d67..b7bc00df0ab 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -62,10 +62,10 @@ DEFAULT_PLUGINS = ["analysis-icu", "analysis-phonetic", "analysis-smartcn", "analysis-stempel", - "cloud-gce", "delete-by-query", "discovery-azure", "discovery-ec2", + "discovery-gce", "discovery-multicast", "lang-expression", "lang-groovy", diff --git a/dev-tools/src/main/resources/forbidden/all-signatures.txt b/dev-tools/src/main/resources/forbidden/all-signatures.txt index 836c324d43d..fcd216f1e27 100644 --- a/dev-tools/src/main/resources/forbidden/all-signatures.txt +++ b/dev-tools/src/main/resources/forbidden/all-signatures.txt @@ -133,6 +133,8 @@ com.google.common.io.Resources com.google.common.hash.HashCode com.google.common.hash.HashFunction com.google.common.hash.Hashing +com.google.common.collect.Iterators +com.google.common.net.InetAddresses com.google.common.collect.ImmutableMap com.google.common.collect.ImmutableMap$Builder diff --git a/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties b/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties index 67d139ede0a..1588e113d86 100644 --- a/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties +++ b/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties @@ -24,7 +24,7 @@ # jvm=true # classname=foo.bar.BazPlugin # description=My cool plugin -# version=2.0 +# version=2.0.0-rc1 # elasticsearch.version=2.0 # java.version=1.7 # @@ -64,6 +64,10 @@ classname=${elasticsearch.plugin.classname} java.version=${maven.compiler.target} # # 'elasticsearch.version' version of elasticsearch compiled against +# You will have to release a new version of the plugin for each new +# elasticsearch release. This version is checked when the plugin +# is loaded so Elasticsearch will refuse to start in the presence of +# plugins with the incorrect elasticsearch.version. elasticsearch.version=${elasticsearch.version} # ### deprecated elements for jvm plugins : diff --git a/distribution/deb/pom.xml b/distribution/deb/pom.xml index 72376749357..c43e32be4f7 100644 --- a/distribution/deb/pom.xml +++ b/distribution/deb/pom.xml @@ -76,6 +76,7 @@ bin/elasticsearch bin/elasticsearch.in.sh bin/plugin + bin/elasticsearch-systemd-pre-exec @@ -110,7 +111,7 @@ ${project.build.directory}/generated-packaging/deb/bin directory - elasticsearch,elasticsearch.in.sh,plugin + elasticsearch,elasticsearch.in.sh,plugin,elasticsearch-systemd-pre-exec perm ${packaging.elasticsearch.bin.dir} @@ -119,6 +120,19 @@ root + + + template + + ${packaging.elasticsearch.conf.dir} + + + perm + 750 + root + elasticsearch + + ${project.basedir}/../src/main/resources/config @@ -127,8 +141,9 @@ perm ${packaging.elasticsearch.conf.dir} + 750 root - root + elasticsearch @@ -136,6 +151,12 @@ ${packaging.elasticsearch.conf.dir}/scripts + + perm + 750 + root + elasticsearch + diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index 9ea2beb0c53..3a82bbe7f76 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -74,9 +74,6 @@ DATA_DIR=/var/lib/$NAME # Elasticsearch configuration directory CONF_DIR=/etc/$NAME -# Elasticsearch configuration file (elasticsearch.yml) -CONF_FILE=$CONF_DIR/elasticsearch.yml - # Maximum number of VMA (Virtual Memory Areas) a process can own MAX_MAP_COUNT=262144 @@ -93,10 +90,16 @@ if [ -f "$DEFAULT" ]; then . "$DEFAULT" fi +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi + # Define other required variables PID_FILE="$PID_DIR/$NAME.pid" DAEMON=$ES_HOME/bin/elasticsearch -DAEMON_OPTS="-d -p $PID_FILE --default.config=$CONF_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR" +DAEMON_OPTS="-d -p $PID_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR" export ES_HEAP_SIZE export ES_HEAP_NEWSIZE diff --git a/distribution/deb/src/main/packaging/packaging.properties b/distribution/deb/src/main/packaging/packaging.properties index f268cde4cae..3635928c2ee 100644 --- a/distribution/deb/src/main/packaging/packaging.properties +++ b/distribution/deb/src/main/packaging/packaging.properties @@ -6,7 +6,6 @@ packaging.env.file=/etc/default/elasticsearch # Default configuration directory and file to use in bin/plugin script packaging.plugin.default.config.dir=${packaging.elasticsearch.conf.dir} -packaging.plugin.default.config.file=${packaging.elasticsearch.conf.dir}/elasticsearch.yml # Simple marker to check that properties are correctly overridden packaging.type=deb diff --git a/distribution/rpm/pom.xml b/distribution/rpm/pom.xml index 37f7203d052..218e19e57b7 100644 --- a/distribution/rpm/pom.xml +++ b/distribution/rpm/pom.xml @@ -79,6 +79,7 @@ bin/elasticsearch bin/elasticsearch.in.sh bin/plugin + bin/elasticsearch-systemd-pre-exec @@ -127,6 +128,7 @@ elasticsearch elasticsearch.in.sh plugin + elasticsearch-systemd-pre-exec @@ -140,10 +142,14 @@ that creates the conf.dir.--> ${packaging.elasticsearch.conf.dir} noreplace + elasticsearch + 750 ${packaging.elasticsearch.conf.dir}/ noreplace + elasticsearch + 750 ${project.basedir}/../src/main/resources/config/ @@ -156,6 +162,8 @@ ${packaging.elasticsearch.conf.dir}/scripts noreplace + elasticsearch + 750 diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index 9626dfc862b..924c67871af 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -40,7 +40,7 @@ MAX_MAP_COUNT=${packaging.os.max.map.count} LOG_DIR="${packaging.elasticsearch.log.dir}" DATA_DIR="${packaging.elasticsearch.data.dir}" CONF_DIR="${packaging.elasticsearch.conf.dir}" -CONF_FILE="${packaging.elasticsearch.conf.dir}/elasticsearch.yml" + PID_DIR="${packaging.elasticsearch.pid.dir}" # Source the default env file @@ -49,6 +49,12 @@ if [ -f "$ES_ENV_FILE" ]; then . "$ES_ENV_FILE" fi +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi + exec="$ES_HOME/bin/elasticsearch" prog="elasticsearch" pidfile="$PID_DIR/${prog}.pid" @@ -83,7 +89,6 @@ checkJava() { start() { checkJava [ -x $exec ] || exit 5 - [ -f $CONF_FILE ] || exit 6 if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then echo "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set" return 7 diff --git a/distribution/rpm/src/main/packaging/packaging.properties b/distribution/rpm/src/main/packaging/packaging.properties index b5bf28aef52..bc4af5f5ceb 100644 --- a/distribution/rpm/src/main/packaging/packaging.properties +++ b/distribution/rpm/src/main/packaging/packaging.properties @@ -6,7 +6,6 @@ packaging.env.file=/etc/sysconfig/elasticsearch # Default configuration directory and file to use in bin/plugin script packaging.plugin.default.config.dir=${packaging.elasticsearch.conf.dir} -packaging.plugin.default.config.file=${packaging.elasticsearch.conf.dir}/elasticsearch.yml # Simple marker to check that properties are correctly overridden packaging.type=rpm diff --git a/distribution/src/main/packaging/env/elasticsearch b/distribution/src/main/packaging/env/elasticsearch index cdf05bb900a..0c01d4fb052 100644 --- a/distribution/src/main/packaging/env/elasticsearch +++ b/distribution/src/main/packaging/env/elasticsearch @@ -8,9 +8,6 @@ # Elasticsearch configuration directory #CONF_DIR=${packaging.elasticsearch.conf.dir} -# Elasticsearch configuration file -#CONF_FILE=$CONF_DIR/elasticsearch.yml - # Elasticsearch data directory #DATA_DIR=${packaging.elasticsearch.data.dir} diff --git a/distribution/src/main/packaging/packaging.properties b/distribution/src/main/packaging/packaging.properties index ff95c9d2c16..be5b60487ef 100644 --- a/distribution/src/main/packaging/packaging.properties +++ b/distribution/src/main/packaging/packaging.properties @@ -8,7 +8,6 @@ packaging.env.file= # Default configuration directory and file to use in bin/plugin script packaging.plugin.default.config.dir=$ES_HOME/config -packaging.plugin.default.config.file=$ES_HOME/config/elasticsearch.yml # Default values for min/max heap memory allocated to elasticsearch java process packaging.elasticsearch.heap.min=256m diff --git a/distribution/src/main/packaging/systemd/elasticsearch.service b/distribution/src/main/packaging/systemd/elasticsearch.service index cdcad9d93dd..d8f56f7d053 100644 --- a/distribution/src/main/packaging/systemd/elasticsearch.service +++ b/distribution/src/main/packaging/systemd/elasticsearch.service @@ -7,7 +7,6 @@ After=network-online.target [Service] Environment=ES_HOME=${packaging.elasticsearch.home.dir} Environment=CONF_DIR=${packaging.elasticsearch.conf.dir} -Environment=CONF_FILE=${packaging.elasticsearch.conf.dir}/elasticsearch.yml Environment=DATA_DIR=${packaging.elasticsearch.data.dir} Environment=LOG_DIR=${packaging.elasticsearch.log.dir} Environment=PID_DIR=${packaging.elasticsearch.pid.dir} @@ -18,12 +17,13 @@ WorkingDirectory=${packaging.elasticsearch.home.dir} User=${packaging.elasticsearch.user} Group=${packaging.elasticsearch.group} +ExecStartPre=${packaging.elasticsearch.bin.dir}/elasticsearch-systemd-pre-exec + ExecStart=${packaging.elasticsearch.bin.dir}/elasticsearch \ -Des.pidfile=${PID_DIR}/elasticsearch.pid \ -Des.default.path.home=${ES_HOME} \ -Des.default.path.logs=${LOG_DIR} \ -Des.default.path.data=${DATA_DIR} \ - -Des.default.config=${CONF_FILE} \ -Des.default.path.conf=${CONF_DIR} # Connects standard output to /dev/null diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/main/resources/bin/elasticsearch index 878fcff3929..66f465765bf 100755 --- a/distribution/src/main/resources/bin/elasticsearch +++ b/distribution/src/main/resources/bin/elasticsearch @@ -42,10 +42,10 @@ # Be aware that you will be entirely responsible for populating the needed # environment variables. - # Maven will replace the project.name with elasticsearch below. If that # hasn't been done, we assume that this is not a packaged version and the # user has forgotten to run Maven to create a package. + IS_PACKAGED_VERSION='${project.parent.artifactId}' if [ "$IS_PACKAGED_VERSION" != "distributions" ]; then cat >&2 << EOF diff --git a/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec b/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec new file mode 100755 index 00000000000..a51d639bf7d --- /dev/null +++ b/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec @@ -0,0 +1,7 @@ +#!/bin/sh + +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi diff --git a/distribution/src/main/resources/bin/plugin b/distribution/src/main/resources/bin/plugin index c466d483e38..35dbe3a620a 100755 --- a/distribution/src/main/resources/bin/plugin +++ b/distribution/src/main/resources/bin/plugin @@ -1,5 +1,6 @@ #!/bin/sh + CDPATH="" SCRIPT="$0" @@ -21,17 +22,10 @@ ES_HOME=`dirname "$SCRIPT"`/.. # make ELASTICSEARCH_HOME absolute ES_HOME=`cd "$ES_HOME"; pwd` + # Sets the default values for elasticsearch variables used in this script if [ -z "$CONF_DIR" ]; then CONF_DIR="${packaging.plugin.default.config.dir}" - - if [ -z "$CONF_FILE" ]; then - CONF_FILE="$CONF_DIR/elasticsearch.yml" - fi -fi - -if [ -z "$CONF_FILE" ]; then - CONF_FILE="${packaging.plugin.default.config.file}" fi # The default env file is defined at building/packaging time. @@ -66,6 +60,12 @@ if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then unset JAVA_TOOL_OPTIONS fi +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi + if [ -x "$JAVA_HOME/bin/java" ]; then JAVA=$JAVA_HOME/bin/java else @@ -105,16 +105,6 @@ if [ -e "$CONF_DIR" ]; then esac fi -if [ -e "$CONF_FILE" ]; then - case "$properties" in - *-Des.default.config=*|*-Des.config=*) - ;; - *) - properties="$properties -Des.default.config=\"$CONF_FILE\"" - ;; - esac -fi - # full hostname passed through cut for portability on systems that do not support hostname -s # export on separate line for shells that do not support combining definition and export HOSTNAME=`hostname | cut -d. -f1` diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/service.bat index 06c9c6461af..9822e6bbdc0 100644 Binary files a/distribution/src/main/resources/bin/service.bat and b/distribution/src/main/resources/bin/service.bat differ diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 2ce05fdb1f9..e0db081a8ea 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -42,6 +42,72 @@ can fill in the necessary values in the `pom.xml` for your plugin. For instance, see https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/pom.xml[`plugins/site-example/pom.xml`]. +[float] +==== Mandatory elements for all plugins + + +[cols="<,<,<",options="header",] +|======================================================================= +|Element | Type | Description + +|`description` |String | simple summary of the plugin + +|`version` |String | plugin's version + +|`name` |String | the plugin name + +|======================================================================= + + + +[float] +==== Mandatory elements for Java plugins + + +[cols="<,<,<",options="header",] +|======================================================================= +|Element | Type | Description + +|`jvm` |Boolean | true if the `classname` class should be loaded +from jar files in the root directory of the plugin. +Note that only jar files in the root directory are added to the classpath for the plugin! +If you need other resources, package them into a resources jar. + +|`classname` |String | the name of the class to load, fully-qualified. + +|`java.version` |String | version of java the code is built against. +Use the system property `java.specification.version`. Version string must be a sequence +of nonnegative decimal integers separated by "."'s and may have leading zeros. + +|`elasticsearch.version` |String | version of elasticsearch compiled against. + +|======================================================================= + +[IMPORTANT] +.Plugin release lifecycle +============================================== + +You will have to release a new version of the plugin for each new elasticsearch release. +This version is checked when the plugin is loaded so Elasticsearch will refuse to start +in the presence of plugins with the incorrect `elasticsearch.version`. + +============================================== + + +[float] +==== Mandatory elements for Site plugins + + +[cols="<,<,<",options="header",] +|======================================================================= +|Element | Type | Description + +|`site` |Boolean | true to indicate contents of the `_site/` +directory in the root of the plugin should be served. + +|======================================================================= + + [float] === Testing your plugin diff --git a/docs/plugins/cloud-gce.asciidoc b/docs/plugins/discovery-gce.asciidoc similarity index 81% rename from docs/plugins/cloud-gce.asciidoc rename to docs/plugins/discovery-gce.asciidoc index 6d712ada0e5..fef86462ae4 100644 --- a/docs/plugins/cloud-gce.asciidoc +++ b/docs/plugins/discovery-gce.asciidoc @@ -1,9 +1,9 @@ -[[cloud-gce]] -=== GCE Cloud Plugin +[[discovery-gce]] +=== GCE Discovery Plugin -The Google Compute Engine Cloud plugin uses the GCE API for unicast discovery. +The Google Compute Engine Discovery plugin uses the GCE API for unicast discovery. -[[cloud-gce-install]] +[[discovery-gce-install]] [float] ==== Installation @@ -11,13 +11,13 @@ This plugin can be installed using the plugin manager: [source,sh] ---------------------------------------------------------------- -sudo bin/plugin install cloud-gce +sudo bin/plugin install discovery-gce ---------------------------------------------------------------- The plugin must be installed on every node in the cluster, and each node must be restarted after installation. -[[cloud-gce-remove]] +[[discovery-gce-remove]] [float] ==== Removal @@ -25,12 +25,12 @@ The plugin can be removed with the following command: [source,sh] ---------------------------------------------------------------- -sudo bin/plugin remove cloud-gce +sudo bin/plugin remove discovery-gce ---------------------------------------------------------------- The node must be stopped before removing the plugin. -[[cloud-gce-usage-discovery]] +[[discovery-gce-usage]] ==== GCE Virtual Machine Discovery Google Compute Engine VM discovery allows to use the google APIs to perform automatic discovery (similar to multicast @@ -46,7 +46,45 @@ discovery: type: gce -------------------------------------------------- -[[cloud-gce-usage-discovery-short]] + +[IMPORTANT] +.Binding the network host +============================================== + +It's important to define `network.host` as by default it's bound to `localhost`. + +You can use {ref}/modules-network.html[core network host settings] or +<>: + +============================================== + +[[discovery-gce-network-host]] +==== GCE Network Host + +When the `discovery-gce` plugin is installed, the following are also allowed +as valid network host settings: + +[cols="<,<",options="header",] +|================================================================== +|GCE Host Value |Description +|`_gce:privateIp:X_` |The private IP address of the machine for a given network interface. +|`_gce:hostname_` |The hostname of the machine. +|`_gce_` |Same as `_gce:privateIp:0_` (recommended). +|================================================================== + +Examples: + +[source,yaml] +-------------------------------------------------- +# get the IP address from network interface 1 +network.host: _gce:privateIp:1_ +# shortcut for _gce:privateIp:0_ +network.host: _gce_ +# Using GCE internal hostname (recommended) +network.host: _gce:hostname_ +-------------------------------------------------- + +[[discovery-gce-usage-short]] ===== How to start (short story) * Create Google Compute Engine instance (with compute rw permissions) @@ -55,11 +93,11 @@ discovery: * Modify `elasticsearch.yml` file * Start Elasticsearch -[[cloud-gce-usage-discovery-long]] +[[discovery-gce-usage-long]] ==== Setting up GCE Discovery -[[cloud-gce-usage-discovery-long-prerequisites]] +[[discovery-gce-usage-long-prerequisites]] ===== Prerequisites Before starting, you need: @@ -74,7 +112,8 @@ If you did not set it yet, you can define your default project you will work on: gcloud config set project es-cloud -------------------------------------------------- -[[cloud-gce-usage-discovery-long-login]] +[[discovery-gce-usage-long-login]] +===== Login to Google Cloud If you haven't already, login to Google Cloud @@ -86,7 +125,7 @@ gcloud auth login This will open your browser. You will be asked to sign-in to a Google account and authorize access to the Google Cloud SDK. -[[cloud-gce-usage-discovery-long-first-instance]] +[[discovery-gce-usage-long-first-instance]] ===== Creating your first instance @@ -129,7 +168,7 @@ scopes=compute-rw -------------------------------------------------- Failing to set this will result in unauthorized messages when starting Elasticsearch. -See [Machine Permissions](#machine-permissions). +See <>. ============================================== @@ -142,22 +181,29 @@ sudo apt-get update # Download Elasticsearch wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-2.0.0.deb -# Prepare Java installation -sudo apt-get install java8-runtime-headless +# Prepare Java installation (Oracle) +sudo echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee /etc/apt/sources.list.d/webupd8team-java.list +sudo echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list.d/webupd8team-java.list +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886 +sudo apt-get update +sudo apt-get install oracle-java8-installer + +# Prepare Java installation (or OpenJDK) +# sudo apt-get install java8-runtime-headless # Prepare Elasticsearch installation sudo dpkg -i elasticsearch-2.0.0.deb -------------------------------------------------- -[[cloud-gce-usage-discovery-long-install-plugin]] -===== Install elasticsearch cloud gce plugin +[[discovery-gce-usage-long-install-plugin]] +===== Install elasticsearch discovery gce plugin Install the plugin: [source,sh] -------------------------------------------------- # Use Plugin Manager to install it -sudo bin/plugin install cloud-gce +sudo bin/plugin install discovery-gce -------------------------------------------------- Open the `elasticsearch.yml` file: @@ -211,7 +257,7 @@ discovery.gce: TRACE -[[cloud-gce-usage-discovery-cloning]] +[[discovery-gce-usage-cloning]] ==== Cloning your existing machine In order to build a cluster on many nodes, you can clone your configured instance to new nodes. @@ -221,7 +267,7 @@ First create an image of your running instance and upload it to Google Cloud Sto [source,sh] -------------------------------------------------- -# Create an image of yur current instance +# Create an image of your current instance sudo /usr/bin/gcimagebundle -d /dev/sda -o /tmp/ # An image has been created in `/tmp` directory: @@ -242,7 +288,7 @@ gcloud compute images create elasticsearch-2-0-0 --source-uri gs://esimage/e4686 # and launch the same command from your local machine. -------------------------------------------------- -[[cloud-gce-usage-discovery-start-new-instances]] +[[discovery-gce-usage-start-new-instances]] ===== Start new instances As you have now an image, you can create as many instances as you need: @@ -257,7 +303,7 @@ gcloud compute instances create myesnode2 --image=elasticsearch-2-0-0 \ --zone europe-west1-a --machine-type f1-micro --scopes=compute-rw -------------------------------------------------- -[[cloud-gce-usage-discovery-remove-instance]] +[[discovery-gce-usage-remove-instance]] ===== Remove an instance (aka shut it down) You can use https://cloud.google.com/console[Google Cloud Console] or CLI to manage your instances: @@ -273,7 +319,7 @@ gcloud compute disks deleted boot-myesnode1 boot-myesnode2 \ --zone=europe-west1-a -------------------------------------------------- -[[cloud-gce-usage-discovery-zones]] +[[discovery-gce-usage-zones]] ==== Using GCE zones `cloud.gce.zone` helps to retrieve instances running in a given zone. It should be one of the @@ -294,7 +340,7 @@ discovery: -[[cloud-gce-usage-discovery-tags]] +[[discovery-gce-usage-tags]] ==== Filtering by tags The GCE discovery can also filter machines to include in the cluster based on tags using `discovery.gce.tags` settings. @@ -330,13 +376,13 @@ discovery: tags: elasticsearch, dev -------------------------------------------------- -[[cloud-gce-usage-discovery-port]] +[[discovery-gce-usage-port]] ==== Changing default transport port By default, elasticsearch GCE plugin assumes that you run elasticsearch on 9300 default port. But you can specify the port value elasticsearch is meant to use using google compute engine metadata `es_port`: -[[cloud-gce-usage-discovery-port-create]] +[[discovery-gce-usage-port-create]] ===== When creating instance Add `--metadata es_port=9301` option: @@ -354,7 +400,7 @@ gcloud compute instances create myesnode2 --image=elasticsearch-1-0-0-RC1 \ --metadata es_port=9301 -------------------------------------------------- -[[cloud-gce-usage-discovery-port-run]] +[[discovery-gce-usage-port-run]] ===== On a running instance [source,sh] @@ -365,10 +411,10 @@ gcloud compute instances add-metadata myesnode1 \ -------------------------------------------------- -[[cloud-gce-usage-discovery-tips]] +[[discovery-gce-usage-tips]] ==== GCE Tips -[[cloud-gce-usage-discovery-tips-projectid]] +[[discovery-gce-usage-tips-projectid]] ===== Store project id locally If you don't want to repeat the project id each time, you can save it in the local gcloud config @@ -378,7 +424,7 @@ If you don't want to repeat the project id each time, you can save it in the loc gcloud config set project es-cloud -------------------------------------------------- -[[cloud-gce-usage-discovery-tips-permissions]] +[[discovery-gce-usage-tips-permissions]] ===== Machine Permissions If you have created a machine without the correct permissions, you will see `403 unauthorized` error messages. The only @@ -427,7 +473,7 @@ Or, you may use the alias: -------------------------------------------------- -- -[[cloud-gce-usage-discovery-testing]] +[[discovery-gce-usage-testing]] ==== Testing GCE Integrations tests in this plugin require working GCE configuration and diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 289a0204e2f..1fab9427d1a 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -17,9 +17,9 @@ The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] f The Azure discovery plugin uses the Azure API for unicast discovery. -<>:: +<>:: -The Google Compute Engine Cloud plugin uses the GCE API for unicast discovery. +The Google Compute Engine discovery plugin uses the GCE API for unicast discovery. <>:: @@ -38,7 +38,7 @@ include::discovery-ec2.asciidoc[] include::discovery-azure.asciidoc[] -include::cloud-gce.asciidoc[] +include::discovery-gce.asciidoc[] include::discovery-multicast.asciidoc[] diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index bae66133037..52ff574cfc7 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -131,6 +131,8 @@ Plugins can be removed manually, by deleting the appropriate directory under sudo bin/plugin remove [pluginname] ----------------------------------- +After a Java plugin has been removed, you will need to restart the node to complete the removal process. + === Other command line parameters The `plugin` scripts supports a number of other command line parameters: diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index cdd760175d1..5afff0cb89f 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -281,7 +281,7 @@ had a value. { "aggs" : { "publish_date" : { - "datehistogram" : { + "date_histogram" : { "field" : "publish_date", "interval": "year", "missing": "2000-01-01" <1> diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index 4410db3a798..e4cdae5a781 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -2,8 +2,6 @@ == Pipeline Aggregations -coming[2.0.0-beta1] - experimental[] Pipeline aggregations work on the outputs produced from other aggregations rather than from document sets, adding diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index b2b9d93f767..541ffecbf95 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-avg-bucket-aggregation]] === Avg Bucket Aggregation -coming[2.0.0-beta1] - experimental[] A sibling pipeline aggregation which calculates the (mean) average value of a specified metric in a sibling aggregation. diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 81372c14b1d..b1bbfcd7bc6 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-bucket-script-aggregation]] === Bucket Script Aggregation -coming[2.0.0-beta1] - experimental[] A parent pipeline aggregation which executes a script which can perform per bucket computations on specified metrics diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index cef1e6716d3..2d80abc8858 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-bucket-selector-aggregation]] === Bucket Selector Aggregation -coming[2.0.0-beta1] - experimental[] A parent pipeline aggregation which executes a script which determines whether the current bucket will be retained diff --git a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc index 823c5c80d6d..e29dbbe7ee9 100644 --- a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-cumulative-sum-aggregation]] === Cumulative Sum Aggregation -coming[2.0.0-beta1] - experimental[] A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent histogram (or date_histogram) diff --git a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc index 48296caf608..f68a8118a34 100644 --- a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-derivative-aggregation]] === Derivative Aggregation -coming[2.0.0-beta1] - experimental[] A parent pipeline aggregation which calculates the derivative of a specified metric in a parent histogram (or date_histogram) diff --git a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc index bbf610ac8ab..0a44685ba1c 100644 --- a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-extended-stats-bucket-aggregation]] === Extended Stats Bucket Aggregation -coming[2.1.0] - experimental[] A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. diff --git a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc index 310a643a66c..96094d04562 100644 --- a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-max-bucket-aggregation]] === Max Bucket Aggregation -coming[2.0.0-beta1] - experimental[] A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation diff --git a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc index 11d3d559512..c970384ad64 100644 --- a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-min-bucket-aggregation]] === Min Bucket Aggregation -coming[2.0.0-beta1] - experimental[] A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 6fe91cb45c6..968c596019c 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-movavg-aggregation]] === Moving Average Aggregation -coming[2.0.0-beta1] - experimental[] Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index 247696943dd..4e6423f0ead 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-percentiles-bucket-aggregation]] === Percentiles Bucket Aggregation -coming[2.1.0] - experimental[] A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 7193510bf1a..17cfea93990 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-serialdiff-aggregation]] === Serial Differencing Aggregation -coming[2.0.0-beta1] - experimental[] Serial differencing is a technique where values in a time series are subtracted from itself at diff --git a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc index 7d6d24dda67..f5240324940 100644 --- a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-stats-bucket-aggregation]] === Stats Bucket Aggregation -coming[2.1.0] - experimental[] A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. diff --git a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc index 56d786f59f0..52022b376f3 100644 --- a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-pipeline-sum-bucket-aggregation]] === Sum Bucket Aggregation -coming[2.0.0-beta1] - experimental[] A sibling pipeline aggregation which calculates the sum across all bucket of a specified metric in a sibling aggregation. diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index b9b7d4751e6..ef066eb9bb5 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -131,8 +131,6 @@ operation based on the `_parent` / `_routing` mapping. [[bulk-timestamp]] === Timestamp -deprecated[2.0.0,The `_timestamp` field is deprecated. Instead, use a normal <> field and set its value explicitly] - Each bulk item can include the timestamp value using the `_timestamp`/`timestamp` field. It automatically follows the behavior of the index operation based on the `_timestamp` mapping. @@ -141,8 +139,6 @@ the index operation based on the `_timestamp` mapping. [[bulk-ttl]] === TTL -deprecated[2.0.0,The current `_ttl` implementation is deprecated and will be replaced with a different implementation in a future version] - Each bulk item can include the ttl value using the `_ttl`/`ttl` field. It automatically follows the behavior of the index operation based on the `_ttl` mapping. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index ab542c520b5..089af3e32ed 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -257,8 +257,6 @@ specified using the `routing` parameter. [[index-timestamp]] === Timestamp -deprecated[2.0.0,The `_timestamp` field is deprecated. Instead, use a normal <> field and set its value explicitly] - A document can be indexed with a `timestamp` associated with it. The `timestamp` value of a document can be set using the `timestamp` parameter. For example: @@ -281,8 +279,6 @@ page>>. [[index-ttl]] === TTL -deprecated[2.0.0,The current `_ttl` implementation is deprecated and will be replaced with a different implementation in a future version] - A document can be indexed with a `ttl` (time to live) associated with it. Expired documents will be expunged automatically. The expiration diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index 7530ff7faea..0e108430f85 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -81,8 +81,6 @@ omit : [float] ==== Distributed frequencies -coming[2.0.0-beta1] - Setting `dfs` to `true` (default is `false`) will return the term statistics or the field statistics of the entire index, and not just at the shard. Use it with caution as distributed frequencies can have a serious performance impact. @@ -90,8 +88,6 @@ with caution as distributed frequencies can have a serious performance impact. [float] ==== Terms Filtering -coming[2.0.0-beta1] - With the parameter `filter`, the terms returned could also be filtered based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 2731fca7e6c..a30046e1333 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -862,7 +862,7 @@ In the previous section, we skipped over a little detail called the document sco But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. -The <> that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. +The <> that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index e36a66a2a20..8e34747ffdb 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,11 +1,12 @@ [[elasticsearch-reference]] = Elasticsearch Reference -:version: 2.0.0-beta1 -:branch: 2.0 -:jdk: 1.8.0_25 -:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current -:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master +:version: 3.0.0-beta1 +:major-version: 3.x +:branch: 3.0 +:jdk: 1.8.0_25 +:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current +:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master include::getting-started.asciidoc[] diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index d17f409c24e..1a256a6330a 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -16,8 +16,6 @@ curl -XGET 'localhost:9200/_analyze' -d ' }' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - If text parameter is provided as array of strings, it is analyzed as a multi-valued field. [source,js] @@ -29,8 +27,6 @@ curl -XGET 'localhost:9200/_analyze' -d ' }' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - Or by building a custom transient analyzer out of tokenizers, token filters and char filters. Token filters can use the shorter 'filters' parameter name: @@ -53,8 +49,6 @@ curl -XGET 'localhost:9200/_analyze' -d ' }' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - It can also run against a specific index: [source,js] @@ -78,8 +72,6 @@ curl -XGET 'localhost:9200/test/_analyze' -d ' }' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - Also, the analyzer can be derived based on a field mapping, for example: [source,js] @@ -91,8 +83,6 @@ curl -XGET 'localhost:9200/test/_analyze' -d ' }' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - Will cause the analysis to happen based on the analyzer configured in the mapping for `obj1.field1` (and if not, the default index analyzer). diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc index 22e46c4f765..64f4a9934a6 100644 --- a/docs/reference/mapping/fields/parent-field.asciidoc +++ b/docs/reference/mapping/fields/parent-field.asciidoc @@ -1,8 +1,6 @@ [[mapping-parent-field]] === `_parent` field -added[2.0.0-beta1,The parent-child implementation has been completely rewritten. It is advisable to reindex any 1.x indices which use parent-child to take advantage of the new optimizations] - A parent-child relationship can be established between documents in the same index by making one mapping type the parent of another: diff --git a/docs/reference/mapping/fields/timestamp-field.asciidoc b/docs/reference/mapping/fields/timestamp-field.asciidoc index 3f4bf8a8134..5971a02c771 100644 --- a/docs/reference/mapping/fields/timestamp-field.asciidoc +++ b/docs/reference/mapping/fields/timestamp-field.asciidoc @@ -1,8 +1,6 @@ [[mapping-timestamp-field]] === `_timestamp` field -deprecated[2.0.0,The `_timestamp` field is deprecated. Instead, use a normal <> field and set its value explicitly] - The `_timestamp` field, when enabled, allows a timestamp to be indexed and stored with a document. The timestamp may be specified manually, generated automatically, or set to a default value: diff --git a/docs/reference/mapping/fields/ttl-field.asciidoc b/docs/reference/mapping/fields/ttl-field.asciidoc index 5394d282f47..07ce8a86b9e 100644 --- a/docs/reference/mapping/fields/ttl-field.asciidoc +++ b/docs/reference/mapping/fields/ttl-field.asciidoc @@ -1,8 +1,6 @@ [[mapping-ttl-field]] === `_ttl` field -deprecated[2.0.0,The current `_ttl` implementation is deprecated and will be replaced with a different implementation in a future version] - Some types of documents, such as session data or special offers, come with an expiration date. The `_ttl` field allows you to specify the minimum time a document should live, after which time the document is deleted automatically. diff --git a/docs/reference/migration/migrate_2_0/crud.asciidoc b/docs/reference/migration/migrate_2_0/crud.asciidoc index 060cfc7277d..a7c947e769c 100644 --- a/docs/reference/migration/migrate_2_0/crud.asciidoc +++ b/docs/reference/migration/migrate_2_0/crud.asciidoc @@ -76,7 +76,7 @@ might return: "_index": "my_index", "_type": "my_type", "_id": "1", - "_timestamp": 10000000, <1>, + "_timestamp": 10000000, <1> "_source": { "foo" : [ "bar" ] } diff --git a/docs/reference/migration/migrate_2_0/java.asciidoc b/docs/reference/migration/migrate_2_0/java.asciidoc index 9871df4e68d..ef9c7efedf0 100644 --- a/docs/reference/migration/migrate_2_0/java.asciidoc +++ b/docs/reference/migration/migrate_2_0/java.asciidoc @@ -22,8 +22,8 @@ Client client = TransportClient.builder().settings(settings).build(); -------------------------------------------------- The transport client also no longer supports loading settings from config files. -If you have have a config file, you can load into settings yourself before -consturcting the transport client: +If you have a config file, you can load it into settings yourself before +constructing the transport client: [source,java] -------------------------------------------------- @@ -32,6 +32,13 @@ Settings settings = Settings.settingsBuilder() Client client = TransportClient.builder().settings(settings).build(); -------------------------------------------------- +==== Exception are only thrown on total failure + +Previously, many APIs would throw an exception if any shard failed to execute +the request. Now the exception is only thrown if all shards fail the request. +The responses for these APIs will always have a `getShardFailures` method that +you can and should check for failures. + ==== Automatically thread client listeners Previously, the user had to set request listener threads to `true` when on the @@ -109,7 +116,7 @@ new InetSocketTransportAddress(new InetSocketAddress("127.0.0.1", 0)); Elasticsearch used to shade its dependencies and to relocate packages. We no longer use shading or relocation. You might need to change your imports to the original package names: -* `com.google.common` was `org.elasticsearch.common` +* `com.google.common` was `org.elasticsearch.common` * `com.carrotsearch.hppc` was `org.elasticsearch.common.hppc` * `jsr166e` was `org.elasticsearch.common.util.concurrent.jsr166e` * `com.fasterxml.jackson` was `org.elasticsearch.common.jackson` @@ -121,4 +128,3 @@ You might need to change your imports to the original package names: * `com.tdunning.math.stats` was `org.elasticsearch.common.stats` * `org.apache.commons.lang` was `org.elasticsearch.common.lang` * `org.apache.commons.cli` was `org.elasticsearch.common.cli.commons` - diff --git a/docs/reference/migration/migrate_2_0/mapping.asciidoc b/docs/reference/migration/migrate_2_0/mapping.asciidoc index c028a2e9663..86d81b23810 100644 --- a/docs/reference/migration/migrate_2_0/mapping.asciidoc +++ b/docs/reference/migration/migrate_2_0/mapping.asciidoc @@ -160,7 +160,7 @@ You can no longer create fields with dots in the name. In 1.x, Elasticsearch would issue a warning if a type name included a dot, e.g. `my.type`. Now that type names are no longer used to distinguish between -fields in differnt types, this warning has been relaxed: type names may now +fields in different types, this warning has been relaxed: type names may now contain dots, but they may not *begin* with a dot. The only exception to this is the special `.percolator` type. diff --git a/docs/reference/migration/migrate_2_0/settings.asciidoc b/docs/reference/migration/migrate_2_0/settings.asciidoc index 17d36c598cc..923d5069705 100644 --- a/docs/reference/migration/migrate_2_0/settings.asciidoc +++ b/docs/reference/migration/migrate_2_0/settings.asciidoc @@ -3,7 +3,7 @@ ==== Command line flags Command line flags using single dash notation must be now specified as the first arguments. -For example if previously using: +For example if previously using: [source,sh] --------------- @@ -14,7 +14,7 @@ This will now need to be changed to: [source,sh] --------------- -./elasticsearch -Des.path.conf=/opt/elasticsearch/conf/test_node --node.name=test_node +./elasticsearch -Des.path.conf=/opt/elasticsearch/conf/test_node --node.name=test_node --------------- for the flag to take effect. @@ -164,3 +164,19 @@ the `logging.yml` configuration file with the `file.layout.conversionPattern` setting. Remove mapping.date.round_ceil setting for date math parsing #8889 (issues: #8556, #8598) + +==== Custom config file + +It is no longer possible to specify a custom config file with the `CONF_FILE` +environment variable, or the `-Des.config`, `-Des.default.config`, or +`-Delasticsearch.config` parameters. + +Instead, the config file must be named `elasticsearch.yml` and must be located +in the default `config/` directory, or in the directory specified in the +`CONF_DIR` environment variable. + +==== `ES_CLASSPATH removed` + +The `ES_CLASSPATH` environment variable is no longer used to set the class +path. External libraries should preferably be loaded using the plugin +mechanism or, if you really must, be copied to the `lib/` directory. diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 897098fdbb6..c1ba60c0f4a 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -121,6 +121,25 @@ function that it supports and it's able to parse. The function object can then t function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed on the data node. +==== Cloud AWS plugin + +Cloud AWS plugin has been split in two plugins: + +* {plugins}/discovery-ec2.html[Discovery EC2 plugin] +* {plugins}/repository-s3.html[Repository S3 plugin] + +==== Cloud Azure plugin + +Cloud Azure plugin has been split in three plugins: + +* {plugins}/discovery-azure.html[Discovery Azure plugin] +* {plugins}/repository-azure.html[Repository Azure plugin] +* {plugins}/store-smb.html[Store SMB plugin] + +==== Cloud GCE plugin + +Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin]. + === Java-API ==== BoostingQueryBuilder @@ -266,3 +285,8 @@ of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction` For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)` +==== DocumentAlreadyExistsException removed + +`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better +error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()` +to index a document only if it doesn't already exist. diff --git a/docs/reference/modules/discovery/gce.asciidoc b/docs/reference/modules/discovery/gce.asciidoc index bb9c89f6130..ea367d52ceb 100644 --- a/docs/reference/modules/discovery/gce.asciidoc +++ b/docs/reference/modules/discovery/gce.asciidoc @@ -2,5 +2,5 @@ === Google Compute Engine Discovery Google Compute Engine (GCE) discovery allows to use the GCE APIs to perform automatic discovery (similar to multicast). -Please check the https://github.com/elasticsearch/elasticsearch-cloud-gce[plugin website] -to find the full documentation. +It is available as a plugin. See {plugins}/discovery-gce.html[discovery-gce] for more information. + diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 70b4d8c97f9..7105d2d60dd 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -54,6 +54,10 @@ provided network interface. For example `_en0:ipv6_`. When the `discovery-ec2` plugin is installed, you can use {plugins}/discovery-ec2-discovery.html#discovery-ec2-network-host[ec2 specific host settings]. +When the `discovery-gce` plugin is installed, you can use +{plugins}/discovery-gce-network-host.html[gce specific host settings]. + + [float] [[tcp-settings]] === TCP Settings diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 7729ce20030..50be5fdce48 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -351,28 +351,86 @@ to `false`. [float] === Native (Java) Scripts -Even though `groovy` is pretty fast, this allows to register native Java based -scripts for faster execution. +Sometimes `groovy` and `expressions` aren't enough. For those times you can +implement a native script. -In order to allow for scripts, the `NativeScriptFactory` needs to be -implemented that constructs the script that will be executed. There are -two main types, one that extends `AbstractExecutableScript` and one that -extends `AbstractSearchScript` (probably the one most users will extend, -with additional helper classes in `AbstractLongSearchScript`, -`AbstractDoubleSearchScript`, and `AbstractFloatSearchScript`). +The best way to implement a native script is to write a plugin and install it. +The plugin {plugins}/plugin-authors.html[documentation] has more information on +how to write a plugin so that Elasticsearch will properly load it. -Registering them can either be done by settings, for example: -`script.native.my.type` set to `sample.MyNativeScriptFactory` will -register a script named `my`. Another option is in a plugin, access -`ScriptModule` and call `registerScript` on it. +To register the actual script you'll need to implement `NativeScriptFactory` +to construct the script. The actual script will extend either +`AbstractExecutableScript` or `AbstractSearchScript`. The second one is likely +the most useful and has several helpful subclasses you can extend like +`AbstractLongSearchScript`, `AbstractDoubleSearchScript`, and +`AbstractFloatSearchScript`. Finally, your plugin should register the native +script by declaring the `onModule(ScriptModule)` method. -Executing the script is done by specifying the `lang` as `native`, and -the name of the script as the `script`. +If you squashed the whole thing into one class it'd look like: + +[source,java] +-------------------------------------------------- +public class MyNativeScriptPlugin extends Plugin { + @Override + public String name() { + return "my-native-script"; + } + @Override + public String description() { + return "my native script that does something great"; + } + public void onModule(ScriptModule scriptModule) { + scriptModule.registerScript("my_script", MyNativeScriptFactory.class); + } + + public static class MyNativeScriptFactory implements NativeScriptFactory { + @Override + public ExecutableScript newScript(@Nullable Map params) { + return new MyNativeScript(); + } + @Override + public boolean needsScores() { + return false; + } + } + + public static class MyNativeScript extends AbstractFloatSearchScript { + @Override + public float runAsFloat() { + float a = (float) source().get("a"); + float b = (float) source().get("b"); + return a * b; + } + } +} +-------------------------------------------------- + +You can execute the script by specifying its `lang` as `native`, and the name +of the script as the `id`: + +[source,js] +-------------------------------------------------- +curl -XPOST localhost:9200/_search -d '{ + "query": { + "function_score": { + "query": { + "match": { + "body": "foo" + } + }, + "functions": [ + { + "script_score": { + "id": "my_script", + "lang" : "native" + } + } + ] + } + } +}' +-------------------------------------------------- -Note, the scripts need to be in the classpath of elasticsearch. One -simple way to do it is to create a directory under plugins (choose a -descriptive name), and place the jar / classes files there. They will be -automatically loaded. [float] === Lucene Expressions Scripts @@ -624,16 +682,3 @@ power of the second argument. |`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow or underflow. |======================================================================= - -[float] -=== Arithmetic precision in MVEL - -When dividing two numbers using MVEL based scripts, the engine tries to -be smart and adheres to the default behaviour of java. This means if you -divide two integers (you might have configured the fields as integer in -the mapping), the result will also be an integer. This means, if a -calculation like `1/num` is happening in your scripts and `num` is an -integer with the value of `8`, the result is `0` even though you were -expecting it to be `0.125`. You may need to enforce precision by -explicitly using a double like `1.0/num` in order to get the expected -result. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 4830e430b0e..50ee4dfd1ea 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -121,7 +121,7 @@ The following settings are supported: using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size). `max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second. `max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second. -`readonly`:: Makes repository read-only. coming[2.1.0] Defaults to `false`. +`readonly`:: Makes repository read-only. Defaults to `false`. [float] ===== Read-only URL Repository @@ -259,7 +259,7 @@ GET /_snapshot/my_backup/_all ----------------------------------- // AUTOSENSE -coming[2.0.0-beta1] A currently running snapshot can be retrieved using the following command: +A currently running snapshot can be retrieved using the following command: [source,sh] ----------------------------------- diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index 9c42881fde5..ee4b695c2ff 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -149,7 +149,7 @@ input, the other one for term selection and for query formation. ==== Document Input Parameters [horizontal] -`like`:: coming[2.0.0-beta1] +`like`:: The only *required* parameter of the MLT query is `like` and follows a versatile syntax, in which the user can specify free form text and/or a single or multiple documents (see examples above). The syntax to specify documents is @@ -162,7 +162,7 @@ follows a similar syntax to the `per_field_analyzer` parameter of the Additionally, to provide documents not necessarily present in the index, <> are also supported. -`unlike`:: coming[2.0.0-beta1] +`unlike`:: The `unlike` parameter is used in conjunction with `like` in order not to select terms found in a chosen set of documents. In other words, we could ask for documents `like: "Apple"`, but `unlike: "cake crumble tree"`. The syntax @@ -172,10 +172,10 @@ is the same as `like`. A list of fields to fetch and analyze the text from. Defaults to the `_all` field for free text and to all possible fields for document inputs. -`like_text`:: deprecated[2.0.0-beta1,Replaced by `like`] +`like_text`:: The text to find documents like it. -`ids` or `docs`:: deprecated[2.0.0-beta1,Replaced by `like`] +`ids` or `docs`:: A list of documents following the same syntax as the <>. [float] diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 29214415bbf..825564d799d 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -63,8 +63,6 @@ curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d' ' -------------------------------------------------- -coming[2.0.0-beta1, body based parameters were added in 2.0.0] - <1> `GET` or `POST` can be used. <2> The URL should not include the `index` or `type` name -- these are specified in the original `search` request instead. @@ -151,8 +149,6 @@ curl -XDELETE localhost:9200/_search/scroll -d ' }' --------------------------------------- -coming[2.0.0-beta1, Body based parameters were added in 2.0.0] - Multiple scroll IDs can be passed as array: [source,js] @@ -163,8 +159,6 @@ curl -XDELETE localhost:9200/_search/scroll -d ' }' --------------------------------------- -coming[2.0.0-beta1, Body based parameters were added in 2.0.0] - All search contexts can be cleared with the `_all` parameter: [source,js] diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 9d14e62e3b4..15f23e6fe1e 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -10,6 +10,14 @@ then check the <> docs. NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`. See <>. +[[supported-platforms]] +[float] +== Supported platforms + +The matrix of officially supported operating systems and JVMs is available here: +link:/support/matrix[Support Matrix]. Elasticsearch is tested on the listed +platforms, but it is possible that it will work on other platforms too. + [[setup-installation]] [float] == Installation diff --git a/docs/reference/setup/as-a-service.asciidoc b/docs/reference/setup/as-a-service.asciidoc index 01bbd2d24e0..1bd6d9bb7ae 100644 --- a/docs/reference/setup/as-a-service.asciidoc +++ b/docs/reference/setup/as-a-service.asciidoc @@ -22,7 +22,6 @@ Each package features a configuration file, which allows you to set the followin `LOG_DIR`:: Log directory, defaults to `/var/log/elasticsearch` `DATA_DIR`:: Data directory, defaults to `/var/lib/elasticsearch` `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` and `logging.yml` files), defaults to `/etc/elasticsearch` -`CONF_FILE`:: Path to configuration file, defaults to `/etc/elasticsearch/elasticsearch.yml` `ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"` `RESTART_ON_UPGRADE`:: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster. `ES_GC_LOG_FILE` :: The absolute log file path for creating a garbage collection logfile, which is done by the JVM. Note that this logfile can grow pretty quick and thus is disabled by default. @@ -40,6 +39,8 @@ sudo update-rc.d elasticsearch defaults 95 10 sudo /etc/init.d/elasticsearch start -------------------------------------------------- +Users running Debian 8 or Ubuntu 14 or later may require configuration of systemd instead of `update-rc.d`. In those cases, please refer to the <> section. + [float] ===== Installing the oracle JDK @@ -70,11 +71,11 @@ sudo /sbin/chkconfig --add elasticsearch sudo service elasticsearch start -------------------------------------------------- - +[[using-systemd]] [float] -===== Using systemd +==== Using systemd -Distributions like SUSE do not use the `chkconfig` tool to register services, but rather `systemd` and its command `/bin/systemctl` to start and stop services (at least in newer versions, otherwise use the `chkconfig` commands above). The configuration file is also placed at `/etc/sysconfig/elasticsearch`. After installing the RPM, you have to change the systemd configuration and then start up elasticsearch +Distributions like Debian Jessie, Ubuntu 14, and many of the SUSE derivitives do not use the `chkconfig` tool to register services, but rather `systemd` and its command `/bin/systemctl` to start and stop services (at least in newer versions, otherwise use the `chkconfig` commands above). The configuration file is also placed at `/etc/sysconfig/elasticsearch` if the system is rpm based and `/etc/default/elasticsearch` if it is deb. After installing the RPM, you have to change the systemd configuration and then start up elasticsearch [source,sh] -------------------------------------------------- diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 45c384bb7bb..07688919e8a 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -298,14 +298,6 @@ Enter value for [node.name]: NOTE: Elasticsearch will not start if `${prompt.text}` or `${prompt.secret}` is used in the settings and the process is run as a service or in the background. -The location of the configuration file can be set externally using a -system property: - -[source,sh] --------------------------------------------------- -$ elasticsearch -Des.config=/path/to/config/file --------------------------------------------------- - [float] [[configuration-index-settings]] === Index Settings diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc index 79b8959c249..70b000ec48c 100644 --- a/docs/reference/setup/repositories.asciidoc +++ b/docs/reference/setup/repositories.asciidoc @@ -6,7 +6,7 @@ binary packages, but no source packages, as the packages are created as part of build. We have split the major versions in separate urls to avoid accidental upgrades across major version. -For all 0.90.x releases use 0.90 as version number, for 1.0.x use 1.0, for 1.1.x use 1.1 etc. +For all 2.x releases use 2.x as version number, for 3.x.y use 3.x etc... We use the PGP key https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4[D88E42B4], Elasticsearch Signing Key, with fingerprint @@ -25,11 +25,11 @@ Download and install the Public Signing Key: wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - -------------------------------------------------- -Save the repository definition to `/etc/apt/sources.list.d/elasticsearch-{branch}.list`: +Save the repository definition to +/etc/apt/sources.list.d/elasticsearch-{major-version}.list+: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-{branch}.list +echo "deb http://packages.elastic.co/elasticsearch/{major-version}/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-{major-version}.list -------------------------------------------------- [WARNING] @@ -57,9 +57,9 @@ If two entries exist for the same Elasticsearch repository, you will see an erro ["literal",subs="attributes,callouts"] -Duplicate sources.list entry http://packages.elastic.co/elasticsearch/{branch}/debian/ ...` +Duplicate sources.list entry http://packages.elastic.co/elasticsearch/{major-version}/debian/ ...` -Examine +/etc/apt/sources.list.d/elasticsearch-{branch}.list+ for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. +Examine +/etc/apt/sources.list.d/elasticsearch-{major-version}.list+ for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. ================================================== Configure Elasticsearch to automatically start during bootup. If your @@ -93,9 +93,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo` ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -[elasticsearch-{branch}] -name=Elasticsearch repository for {branch}.x packages -baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos +[elasticsearch-{major-version}] +name=Elasticsearch repository for {major-version} packages +baseurl=http://packages.elastic.co/elasticsearch/{major-version}/centos gpgcheck=1 gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/network/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/network/Ec2NameResolver.java index f04d3ecc937..3788f8237dc 100755 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/network/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/network/Ec2NameResolver.java @@ -91,31 +91,25 @@ public class Ec2NameResolver extends AbstractComponent implements CustomNameReso * @return the appropriate host resolved from ec2 meta-data, or null if it cannot be obtained. * @see CustomNameResolver#resolveIfPossible(String) */ - public InetAddress[] resolve(Ec2HostnameType type, boolean warnOnFailure) { - URLConnection urlConnection = null; + public InetAddress[] resolve(Ec2HostnameType type) throws IOException { InputStream in = null; + String metadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name; try { - URL url = new URL(AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name); + URL url = new URL(metadataUrl); logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); - urlConnection = url.openConnection(); + URLConnection urlConnection = url.openConnection(); urlConnection.setConnectTimeout(2000); in = urlConnection.getInputStream(); BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); String metadataResult = urlReader.readLine(); if (metadataResult == null || metadataResult.length() == 0) { - logger.error("no ec2 metadata returned from {}", url); - return null; + throw new IOException("no gce metadata returned from [" + url + "] for [" + type.configName + "]"); } // only one address: because we explicitly ask for only one via the Ec2HostnameType return new InetAddress[] { InetAddress.getByName(metadataResult) }; } catch (IOException e) { - if (warnOnFailure) { - logger.warn("failed to get metadata for [" + type.configName + "]", e); - } else { - logger.debug("failed to get metadata for [" + type.configName + "]", e); - } - return null; + throw new IOException("IOException caught when fetching InetAddress from [" + metadataUrl + "]", e); } finally { IOUtils.closeWhileHandlingException(in); } @@ -128,10 +122,10 @@ public class Ec2NameResolver extends AbstractComponent implements CustomNameReso } @Override - public InetAddress[] resolveIfPossible(String value) { + public InetAddress[] resolveIfPossible(String value) throws IOException { for (Ec2HostnameType type : Ec2HostnameType.values()) { if (type.configName.equals(value)) { - return resolve(type, true); + return resolve(type); } } return null; diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java new file mode 100644 index 00000000000..8aa9ca56a3f --- /dev/null +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import org.elasticsearch.cloud.aws.network.Ec2NameResolver; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.net.InetAddress; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; + +/** + * Test for EC2 network.host settings. + */ +public class Ec2NetworkTests extends ESTestCase { + + /** + * Test for network.host: _ec2_ + */ + @Test + public void networkHostEc2() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("local-ipv4")); + } + } + + /** + * Test for network.host: _ec2:publicIp_ + */ + @Test + public void networkHostEc2PublicIp() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:publicIp_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("public-ipv4")); + } + } + + /** + * Test for network.host: _ec2:privateIp_ + */ + @Test + public void networkHostEc2PrivateIp() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:privateIp_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("local-ipv4")); + } + } + + /** + * Test for network.host: _ec2:privateIpv4_ + */ + @Test + public void networkHostEc2PrivateIpv4() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:privateIpv4_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("local-ipv4")); + } + } + + /** + * Test for network.host: _ec2:privateDns_ + */ + @Test + public void networkHostEc2PrivateDns() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:privateDns_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("local-hostname")); + } + } + + /** + * Test for network.host: _ec2:publicIpv4_ + */ + @Test + public void networkHostEc2PublicIpv4() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:publicIpv4_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("public-ipv4")); + } + } + + /** + * Test for network.host: _ec2:publicDns_ + */ + @Test + public void networkHostEc2PublicDns() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_ec2:publicDns_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. + try { + networkService.resolveBindHostAddress(null); + } catch (IOException e) { + assertThat(e.getMessage(), containsString("public-hostname")); + } + } + + /** + * Test that we don't have any regression with network host core settings such as + * network.host: _local_ + */ + @Test + public void networkHostCoreLocal() throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", "_local_") + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + networkService.addCustomNameResolver(new Ec2NameResolver(nodeSettings)); + InetAddress[] addresses = networkService.resolveBindHostAddress(null); + assertThat(addresses, arrayContaining(networkService.resolveBindHostAddress("_local_"))); + } +} diff --git a/plugins/cloud-gce/licenses/commons-codec-1.6.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.6.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/commons-codec-1.6.jar.sha1 rename to plugins/discovery-gce/licenses/commons-codec-1.6.jar.sha1 diff --git a/plugins/cloud-gce/licenses/commons-codec-LICENSE.txt b/plugins/discovery-gce/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/commons-codec-LICENSE.txt rename to plugins/discovery-gce/licenses/commons-codec-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/commons-codec-NOTICE.txt b/plugins/discovery-gce/licenses/commons-codec-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/commons-codec-NOTICE.txt rename to plugins/discovery-gce/licenses/commons-codec-NOTICE.txt diff --git a/plugins/cloud-gce/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/commons-logging-1.1.3.jar.sha1 rename to plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 diff --git a/plugins/cloud-gce/licenses/commons-logging-LICENSE.txt b/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/commons-logging-LICENSE.txt rename to plugins/discovery-gce/licenses/commons-logging-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/commons-logging-NOTICE.txt b/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/commons-logging-NOTICE.txt rename to plugins/discovery-gce/licenses/commons-logging-NOTICE.txt diff --git a/plugins/cloud-gce/licenses/google-LICENSE.txt b/plugins/discovery-gce/licenses/google-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/google-LICENSE.txt rename to plugins/discovery-gce/licenses/google-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/google-NOTICE.txt b/plugins/discovery-gce/licenses/google-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/google-NOTICE.txt rename to plugins/discovery-gce/licenses/google-NOTICE.txt diff --git a/plugins/cloud-gce/licenses/google-api-client-1.20.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.20.0.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/google-api-client-1.20.0.jar.sha1 rename to plugins/discovery-gce/licenses/google-api-client-1.20.0.jar.sha1 diff --git a/plugins/cloud-gce/licenses/google-api-services-compute-v1-rev71-1.20.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev71-1.20.0.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/google-api-services-compute-v1-rev71-1.20.0.jar.sha1 rename to plugins/discovery-gce/licenses/google-api-services-compute-v1-rev71-1.20.0.jar.sha1 diff --git a/plugins/cloud-gce/licenses/google-http-client-1.20.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.20.0.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/google-http-client-1.20.0.jar.sha1 rename to plugins/discovery-gce/licenses/google-http-client-1.20.0.jar.sha1 diff --git a/plugins/cloud-gce/licenses/google-http-client-jackson2-1.20.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.20.0.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/google-http-client-jackson2-1.20.0.jar.sha1 rename to plugins/discovery-gce/licenses/google-http-client-jackson2-1.20.0.jar.sha1 diff --git a/plugins/cloud-gce/licenses/google-oauth-client-1.20.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.20.0.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/google-oauth-client-1.20.0.jar.sha1 rename to plugins/discovery-gce/licenses/google-oauth-client-1.20.0.jar.sha1 diff --git a/plugins/cloud-gce/licenses/httpclient-4.3.6.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.3.6.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/httpclient-4.3.6.jar.sha1 rename to plugins/discovery-gce/licenses/httpclient-4.3.6.jar.sha1 diff --git a/plugins/cloud-gce/licenses/httpclient-LICENSE.txt b/plugins/discovery-gce/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/httpclient-LICENSE.txt rename to plugins/discovery-gce/licenses/httpclient-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/httpclient-NOTICE.txt b/plugins/discovery-gce/licenses/httpclient-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/httpclient-NOTICE.txt rename to plugins/discovery-gce/licenses/httpclient-NOTICE.txt diff --git a/plugins/cloud-gce/licenses/httpcore-4.3.3.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.3.3.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/httpcore-4.3.3.jar.sha1 rename to plugins/discovery-gce/licenses/httpcore-4.3.3.jar.sha1 diff --git a/plugins/cloud-gce/licenses/httpcore-LICENSE.txt b/plugins/discovery-gce/licenses/httpcore-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/httpcore-LICENSE.txt rename to plugins/discovery-gce/licenses/httpcore-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/httpcore-NOTICE.txt b/plugins/discovery-gce/licenses/httpcore-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/httpcore-NOTICE.txt rename to plugins/discovery-gce/licenses/httpcore-NOTICE.txt diff --git a/plugins/cloud-gce/licenses/jsr305-1.3.9.jar.sha1 b/plugins/discovery-gce/licenses/jsr305-1.3.9.jar.sha1 similarity index 100% rename from plugins/cloud-gce/licenses/jsr305-1.3.9.jar.sha1 rename to plugins/discovery-gce/licenses/jsr305-1.3.9.jar.sha1 diff --git a/plugins/cloud-gce/licenses/jsr305-LICENSE.txt b/plugins/discovery-gce/licenses/jsr305-LICENSE.txt similarity index 100% rename from plugins/cloud-gce/licenses/jsr305-LICENSE.txt rename to plugins/discovery-gce/licenses/jsr305-LICENSE.txt diff --git a/plugins/cloud-gce/licenses/jsr305-NOTICE.txt b/plugins/discovery-gce/licenses/jsr305-NOTICE.txt similarity index 100% rename from plugins/cloud-gce/licenses/jsr305-NOTICE.txt rename to plugins/discovery-gce/licenses/jsr305-NOTICE.txt diff --git a/plugins/cloud-gce/pom.xml b/plugins/discovery-gce/pom.xml similarity index 87% rename from plugins/cloud-gce/pom.xml rename to plugins/discovery-gce/pom.xml index da987f8d45b..b7c1c0a7a61 100644 --- a/plugins/cloud-gce/pom.xml +++ b/plugins/discovery-gce/pom.xml @@ -21,15 +21,15 @@ governing permissions and limitations under the License. --> 3.0.0-SNAPSHOT - cloud-gce - Plugin: Cloud: Google Compute Engine - The Google Compute Engine (GCE) Cloud plugin allows to use GCE API for the unicast discovery mechanism. + discovery-gce + Plugin: Discovery: Google Compute Engine + The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. - org.elasticsearch.plugin.cloud.gce.CloudGcePlugin + org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin v1-rev71-1.20.0 - cloud_gce + discovery_gce false -Xlint:-rawtypes,-unchecked diff --git a/plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java similarity index 58% rename from plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java rename to plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java index 6ba857db17a..c7f45980b9b 100644 --- a/plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java @@ -22,13 +22,11 @@ package org.elasticsearch.cloud.gce; import com.google.api.services.compute.model.Instance; import org.elasticsearch.common.component.LifecycleComponent; +import java.io.IOException; import java.util.Collection; -/** - * - */ public interface GceComputeService extends LifecycleComponent { - static final public class Fields { + final class Fields { public static final String PROJECT = "cloud.gce.project_id"; public static final String ZONE = "cloud.gce.zone"; public static final String REFRESH = "cloud.gce.refresh_interval"; @@ -36,5 +34,24 @@ public interface GceComputeService extends LifecycleComponent public static final String VERSION = "Elasticsearch/GceCloud/1.0"; } - public Collection instances(); + /** + * Return a collection of running instances within the same GCE project + * @return a collection of running instances within the same GCE project + */ + Collection instances(); + + /** + *

    Gets metadata on the current running machine (call to + * http://metadata.google.internal/computeMetadata/v1/instance/xxx).

    + *

    For example, you can retrieve network information by replacing xxx with:

    + *
      + *
    • `hostname` when we need to resolve the host name
    • + *
    • `network-interfaces/0/ip` when we need to resolve private IP
    • + *
    + * @see org.elasticsearch.cloud.gce.network.GceNameResolver for bindings + * @param metadataPath path to metadata information + * @return extracted information (for example a hostname or an IP address) + * @throws IOException in case metadata URL is not accessible + */ + String metadata(String metadataPath) throws IOException; } diff --git a/plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java similarity index 78% rename from plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java rename to plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java index 2a9bf6d62d1..a29c21ec526 100644 --- a/plugins/cloud-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java @@ -21,6 +21,9 @@ package org.elasticsearch.cloud.gce; import com.google.api.client.googleapis.compute.ComputeCredential; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpTransport; import com.google.api.client.json.JsonFactory; import com.google.api.client.json.jackson2.JacksonFactory; @@ -30,21 +33,21 @@ import com.google.api.services.compute.model.InstanceList; import org.elasticsearch.SpecialPermission; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.net.URL; import java.security.AccessController; import java.security.GeneralSecurityException; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.*; -/** - * - */ public class GceComputeServiceImpl extends AbstractLifecycleComponent implements GceComputeService { @@ -54,7 +57,8 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent instances() { @@ -95,6 +99,37 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent() { + @Override + public HttpHeaders run() throws IOException { + return new HttpHeaders(); + } + }); + + // This is needed to query meta data: https://cloud.google.com/compute/docs/metadata + headers.put("Metadata-Flavor", "Google"); + HttpResponse response; + response = getGceHttpTransport().createRequestFactory() + .buildGetRequest(new GenericUrl(url)) + .setHeaders(headers) + .execute(); + String metadata = response.parseAsString(); + logger.debug("metadata found [{}]", metadata); + return metadata; + } catch (Exception e) { + throw new IOException("failed to fetch metadata from [" + urlMetadataNetwork + "]", e); + } + } + private Compute client; private TimeValue refreshInterval = null; private long lastRefresh; @@ -106,11 +141,12 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponentResolves certain GCE related 'meta' hostnames into an actual hostname + * obtained from gce meta-data.

    + * Valid config values for {@link GceAddressResolverType}s are - + *
      + *
    • _gce_ - maps to privateIp
    • + *
    • _gce:privateIp_
    • + *
    • _gce:hostname_
    • + *
    + */ +public class GceNameResolver extends AbstractComponent implements CustomNameResolver { + + private final GceComputeService gceComputeService; + + /** + * enum that can be added to over time with more meta-data types + */ + private enum GceAddressResolverType { + + /** + * Using the hostname + */ + PRIVATE_DNS("gce:hostname", "hostname"), + /** + * Can be gce:privateIp, gce:privateIp:X where X is the network interface + */ + PRIVATE_IP("gce:privateIp", "network-interfaces/{{network}}/ip"), + /** + * same as "gce:privateIp" or "gce:privateIp:0" + */ + GCE("gce", PRIVATE_IP.gceName); + + final String configName; + final String gceName; + + GceAddressResolverType(String configName, String gceName) { + this.configName = configName; + this.gceName = gceName; + } + } + + /** + * Construct a {@link CustomNameResolver}. + */ + public GceNameResolver(Settings settings, GceComputeService gceComputeService) { + super(settings); + this.gceComputeService = gceComputeService; + } + + /** + * @param value the gce hostname type to discover. + * @return the appropriate host resolved from gce meta-data. + * @see CustomNameResolver#resolveIfPossible(String) + */ + private InetAddress[] resolve(String value) throws IOException { + String gceMetadataPath; + if (value.equals(GceAddressResolverType.GCE.configName)) { + // We replace network placeholder with default network interface value: 0 + gceMetadataPath = Strings.replace(GceAddressResolverType.GCE.gceName, "{{network}}", "0"); + } else if (value.equals(GceAddressResolverType.PRIVATE_DNS.configName)) { + gceMetadataPath = GceAddressResolverType.PRIVATE_DNS.gceName; + } else if (value.startsWith(GceAddressResolverType.PRIVATE_IP.configName)) { + // We extract the network interface from gce:privateIp:XX + String network = "0"; + String[] privateIpConfig = Strings.splitStringToArray(value, ':'); + if (privateIpConfig != null && privateIpConfig.length == 3) { + network = privateIpConfig[2]; + } + + // We replace network placeholder with network interface value + gceMetadataPath = Strings.replace(GceAddressResolverType.PRIVATE_IP.gceName, "{{network}}", network); + } else { + throw new IllegalArgumentException("[" + value + "] is not one of the supported GCE network.host setting. " + + "Expecting _gce_, _gce:privateIp:X_, _gce:hostname_"); + } + + try { + String metadataResult = gceComputeService.metadata(gceMetadataPath); + if (metadataResult == null || metadataResult.length() == 0) { + throw new IOException("no gce metadata returned from [" + gceMetadataPath + "] for [" + value + "]"); + } + // only one address: because we explicitly ask for only one via the GceHostnameType + return new InetAddress[] { InetAddress.getByName(metadataResult) }; + } catch (IOException e) { + throw new IOException("IOException caught when fetching InetAddress from [" + gceMetadataPath + "]", e); + } + } + + @Override + public InetAddress[] resolveDefault() { + return null; // using this, one has to explicitly specify _gce_ in network setting + } + + @Override + public InetAddress[] resolveIfPossible(String value) throws IOException { + // We only try to resolve network.host setting when it starts with _gce + if (value.startsWith("gce")) { + return resolve(value); + } + return null; + } +} diff --git a/plugins/cloud-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java similarity index 100% rename from plugins/cloud-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java rename to plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java diff --git a/plugins/cloud-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java similarity index 100% rename from plugins/cloud-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java rename to plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java diff --git a/plugins/cloud-gce/src/main/java/org/elasticsearch/plugin/cloud/gce/CloudGcePlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java similarity index 92% rename from plugins/cloud-gce/src/main/java/org/elasticsearch/plugin/cloud/gce/CloudGcePlugin.java rename to plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 0ff8d4ef6b7..a17c3962797 100644 --- a/plugins/cloud-gce/src/main/java/org/elasticsearch/plugin/cloud/gce/CloudGcePlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.plugin.cloud.gce; +package org.elasticsearch.plugin.discovery.gce; import org.elasticsearch.cloud.gce.GceComputeService; import org.elasticsearch.cloud.gce.GceModule; @@ -36,26 +36,23 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -/** - * - */ -public class CloudGcePlugin extends Plugin { +public class GceDiscoveryPlugin extends Plugin { private final Settings settings; - protected final ESLogger logger = Loggers.getLogger(CloudGcePlugin.class); + protected final ESLogger logger = Loggers.getLogger(GceDiscoveryPlugin.class); - public CloudGcePlugin(Settings settings) { + public GceDiscoveryPlugin(Settings settings) { this.settings = settings; } @Override public String name() { - return "cloud-gce"; + return "discovery-gce"; } @Override public String description() { - return "Cloud Google Compute Engine Plugin"; + return "Cloud Google Compute Engine Discovery Plugin"; } @Override diff --git a/plugins/cloud-gce/src/test/java/org/elasticsearch/cloud/gce/CloudGCERestIT.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGCERestIT.java similarity index 83% rename from plugins/cloud-gce/src/test/java/org/elasticsearch/cloud/gce/CloudGCERestIT.java rename to plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGCERestIT.java index e20965565f9..1a218394b7d 100644 --- a/plugins/cloud-gce/src/test/java/org/elasticsearch/cloud/gce/CloudGCERestIT.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGCERestIT.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.cloud.gce; +package org.elasticsearch.discovery.gce; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.plugin.cloud.gce.CloudGcePlugin; +import org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.RestTestCandidate; @@ -30,14 +30,14 @@ import org.elasticsearch.test.rest.parser.RestTestParseException; import java.io.IOException; import java.util.Collection; -public class CloudGCERestIT extends ESRestTestCase { +public class DiscoveryGCERestIT extends ESRestTestCase { @Override protected Collection> nodePlugins() { - return pluginList(CloudGcePlugin.class); + return pluginList(GceDiscoveryPlugin.class); } - public CloudGCERestIT(@Name("yaml") RestTestCandidate testCandidate) { + public DiscoveryGCERestIT(@Name("yaml") RestTestCandidate testCandidate) { super(testCandidate); } diff --git a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java similarity index 86% rename from plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java rename to plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java index 1892297efe8..dcbd53fa95e 100644 --- a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceComputeServiceMock.java @@ -29,6 +29,7 @@ import com.google.api.client.testing.http.MockLowLevelHttpResponse; import org.elasticsearch.cloud.gce.GceComputeServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Callback; @@ -44,8 +45,8 @@ public class GceComputeServiceMock extends GceComputeServiceImpl { protected HttpTransport mockHttpTransport; - public GceComputeServiceMock(Settings settings) { - super(settings); + public GceComputeServiceMock(Settings settings, NetworkService networkService) { + super(settings, networkService); this.mockHttpTransport = configureMock(); } @@ -55,7 +56,7 @@ public class GceComputeServiceMock extends GceComputeServiceImpl { } protected HttpTransport configureMock() { - HttpTransport transport = new MockHttpTransport() { + return new MockHttpTransport() { @Override public LowLevelHttpRequest buildRequest(String method, final String url) throws IOException { return new MockLowLevelHttpRequest() { @@ -64,8 +65,8 @@ public class GceComputeServiceMock extends GceComputeServiceImpl { MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); response.setStatusCode(200); response.setContentType(Json.MEDIA_TYPE); - if (url.equals(TOKEN_SERVER_ENCODED_URL)) { - logger.info("--> Simulate GCE Auth response for [{}]", url); + if (url.startsWith(GCE_METADATA_URL)) { + logger.info("--> Simulate GCE Auth/Metadata response for [{}]", url); response.setContent(readGoogleInternalJsonResponse(url)); } else { logger.info("--> Simulate GCE API response for [{}]", url); @@ -77,8 +78,6 @@ public class GceComputeServiceMock extends GceComputeServiceImpl { }; } }; - - return transport; } private String readGoogleInternalJsonResponse(String url) throws IOException { @@ -91,23 +90,24 @@ public class GceComputeServiceMock extends GceComputeServiceImpl { private String readJsonResponse(String url, String urlRoot) throws IOException { // We extract from the url the mock file path we want to use - String mockFileName = Strings.replace(url, urlRoot, "") + ".json"; + String mockFileName = Strings.replace(url, urlRoot, ""); logger.debug("--> read mock file from [{}]", mockFileName); URL resource = GceComputeServiceMock.class.getResource(mockFileName); + if (resource == null) { + throw new IOException("can't read [" + url + "] in src/test/resources/org/elasticsearch/discovery/gce"); + } try (InputStream is = resource.openStream()) { final StringBuilder sb = new StringBuilder(); Streams.readAllLines(is, new Callback() { @Override public void handle(String s) { - sb.append(s).append("\n"); + sb.append(s); } }); String response = sb.toString(); logger.trace("{}", response); return response; - } catch (IOException e) { - throw e; } } } diff --git a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java similarity index 80% rename from plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java rename to plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java index 90b331dd8dc..334c685aa36 100644 --- a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverySettingsTests.java @@ -19,9 +19,8 @@ package org.elasticsearch.discovery.gce; -import org.elasticsearch.cloud.gce.GceModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugin.cloud.gce.CloudGcePlugin; +import org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.is; @@ -34,20 +33,20 @@ public class GceDiscoverySettingsTests extends ESTestCase { .putArray("cloud.gce.zone", "gce_zones_1", "gce_zones_2") .build(); - boolean discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + boolean discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(true)); } public void testDiscoveryNotReady() { Settings settings = Settings.EMPTY; - boolean discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + boolean discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(false)); settings = Settings.builder() .put("discovery.type", "gce") .build(); - discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(false)); settings = Settings.builder() @@ -55,7 +54,7 @@ public class GceDiscoverySettingsTests extends ESTestCase { .put("cloud.gce.project_id", "gce_id") .build(); - discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(false)); @@ -64,7 +63,7 @@ public class GceDiscoverySettingsTests extends ESTestCase { .putArray("cloud.gce.zone", "gce_zones_1", "gce_zones_2") .build(); - discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(false)); settings = Settings.builder() @@ -72,7 +71,7 @@ public class GceDiscoverySettingsTests extends ESTestCase { .putArray("cloud.gce.zone", "gce_zones_1", "gce_zones_2") .build(); - discoveryReady = CloudGcePlugin.isDiscoveryAlive(settings, logger); + discoveryReady = GceDiscoveryPlugin.isDiscoveryAlive(settings, logger); assertThat(discoveryReady, is(false)); } } diff --git a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java similarity index 91% rename from plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java rename to plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index b18cca1d84e..450ff72ca47 100644 --- a/plugins/cloud-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -63,6 +63,7 @@ public class GceDiscoveryTests extends ESTestCase { protected static ThreadPool threadPool; protected MockTransportService transportService; + protected NetworkService networkService; protected GceComputeService mock; protected String projectName; @@ -91,6 +92,11 @@ public class GceDiscoveryTests extends ESTestCase { new LocalTransport(Settings.EMPTY, threadPool, Version.CURRENT, new NamedWriteableRegistry()), threadPool); } + @Before + public void createNetworkService() { + networkService = new NetworkService(Settings.EMPTY); + } + @After public void stopGceComputeService() { if (mock != null) { @@ -113,7 +119,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.PROJECT, projectName) .put(GceComputeService.Fields.ZONE, "europe-west1-b") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -125,7 +131,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.ZONE, "europe-west1-b") .putArray(GceComputeService.Fields.TAGS, "elasticsearch") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(1)); assertThat(discoveryNodes.get(0).getId(), is("#cloud-test2-0")); @@ -138,7 +144,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.ZONE, "europe-west1-b") .putArray(GceComputeService.Fields.TAGS, "elasticsearch", "dev") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(1)); assertThat(discoveryNodes.get(0).getId(), is("#cloud-test2-0")); @@ -150,7 +156,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.PROJECT, projectName) .put(GceComputeService.Fields.ZONE, "europe-west1-b") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -162,7 +168,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.ZONE, "europe-west1-b") .putArray(GceComputeService.Fields.TAGS, "elasticsearch") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -174,7 +180,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.ZONE, "europe-west1-b") .putArray(GceComputeService.Fields.TAGS, "elasticsearch", "dev") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -185,7 +191,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.PROJECT, projectName) .putArray(GceComputeService.Fields.ZONE, "us-central1-a", "europe-west1-b") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -196,7 +202,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.PROJECT, projectName) .putArray(GceComputeService.Fields.ZONE, "us-central1-a", "europe-west1-b") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(2)); } @@ -210,7 +216,7 @@ public class GceDiscoveryTests extends ESTestCase { .put(GceComputeService.Fields.PROJECT, projectName) .putArray(GceComputeService.Fields.ZONE, "us-central1-a", "us-central1-b") .build(); - mock = new GceComputeServiceMock(nodeSettings); + mock = new GceComputeServiceMock(nodeSettings, networkService); List discoveryNodes = buildDynamicNodes(mock, nodeSettings); assertThat(discoveryNodes, hasSize(0)); } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java new file mode 100644 index 00000000000..7550cdce7e4 --- /dev/null +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.gce; + +import org.elasticsearch.cloud.gce.network.GceNameResolver; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.net.InetAddress; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; + +/** + * Test for GCE network.host settings. + * Related to https://github.com/elastic/elasticsearch/issues/13605 + */ +public class GceNetworkTests extends ESTestCase { + + /** + * Test for network.host: _gce_ + */ + @Test + public void networkHostGceDefault() throws IOException { + resolveGce("_gce_", InetAddress.getByName("10.240.0.2")); + } + + /** + * Test for network.host: _gce:privateIp_ + */ + @Test + public void networkHostPrivateIp() throws IOException { + resolveGce("_gce:privateIp_", InetAddress.getByName("10.240.0.2")); + } + + /** + * Test for network.host: _gce:hostname_ + */ + @Test + public void networkHostPrivateDns() throws IOException { + resolveGce("_gce:hostname_", InetAddress.getByName("localhost")); + } + + /** + * Test for network.host: _gce:doesnotexist_ + * This should raise an IllegalArgumentException as this setting does not exist + */ + @Test + public void networkHostWrongSetting() throws IOException { + resolveGce("_gce:doesnotexist_", (InetAddress) null); + } + + /** + * Test with multiple network interfaces: + * network.host: _gce:privateIp:0_ + * network.host: _gce:privateIp:1_ + */ + @Test + public void networkHostPrivateIpInterface() throws IOException { + resolveGce("_gce:privateIp:0_", InetAddress.getByName("10.240.0.2")); + resolveGce("_gce:privateIp:1_", InetAddress.getByName("10.150.0.1")); + } + + /** + * Test that we don't have any regression with network host core settings such as + * network.host: _local_ + */ + @Test + public void networkHostCoreLocal() throws IOException { + resolveGce("_local_", new NetworkService(Settings.EMPTY).resolveBindHostAddress(NetworkService.DEFAULT_NETWORK_HOST)); + } + + /** + * Utility test method to test different settings + * @param gceNetworkSetting tested network.host property + * @param expected expected InetAddress, null if we expect an exception + * @throws IOException Well... If something goes wrong :) + */ + private void resolveGce(String gceNetworkSetting, InetAddress expected) throws IOException { + resolveGce(gceNetworkSetting, expected == null ? null : new InetAddress [] { expected }); + } + + /** + * Utility test method to test different settings + * @param gceNetworkSetting tested network.host property + * @param expected expected InetAddress, null if we expect an exception + * @throws IOException Well... If something goes wrong :) + */ + private void resolveGce(String gceNetworkSetting, InetAddress[] expected) throws IOException { + Settings nodeSettings = Settings.builder() + .put("network.host", gceNetworkSetting) + .build(); + + NetworkService networkService = new NetworkService(nodeSettings); + GceComputeServiceMock mock = new GceComputeServiceMock(nodeSettings, networkService); + networkService.addCustomNameResolver(new GceNameResolver(nodeSettings, mock)); + try { + InetAddress[] addresses = networkService.resolveBindHostAddress(null); + if (expected == null) { + fail("We should get a IllegalArgumentException when setting network.host: _gce:doesnotexist_"); + } + assertThat(addresses, arrayContaining(expected)); + } catch (IllegalArgumentException e) { + if (expected != null) { + // We were expecting something and not an exception + throw e; + } + // We check that we get the expected exception + assertThat(e.getMessage(), containsString("is not one of the supported GCE network.host setting")); + } + } +} diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/us-central1-a/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/us-central1-a/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/us-central1-a/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesindifferentzones/zones/us-central1-a/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/us-central1-a/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/us-central1-a/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/us-central1-a/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/multiplezonesandtwonodesinsamezone/zones/us-central1-a/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandnotagset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandnotagset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandnotagset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandnotagset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandonetagset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandonetagset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandonetagset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandonetagset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandtwotagset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandtwotagset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandtwotagset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithdifferenttagsandtwotagset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandnotagset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandnotagset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandnotagset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandnotagset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandonetagset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandonetagset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandonetagset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandonetagset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandtwotagsset/zones/europe-west1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandtwotagsset/zones/europe-west1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandtwotagsset/zones/europe-west1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/nodeswithsametagsandtwotagsset/zones/europe-west1-b/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-a/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-a/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-a/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-a/instances diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-b/instances.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-b/instances similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-b/instances.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/zeronode43/zones/us-central1-b/instances diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/hostname b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/hostname new file mode 100644 index 00000000000..2fbb50c4a8d --- /dev/null +++ b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/hostname @@ -0,0 +1 @@ +localhost diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/0/ip b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/0/ip new file mode 100644 index 00000000000..1ac79d6027d --- /dev/null +++ b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/0/ip @@ -0,0 +1 @@ +10.240.0.2 diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/1/ip b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/1/ip new file mode 100644 index 00000000000..e3bb0f875b6 --- /dev/null +++ b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/network-interfaces/1/ip @@ -0,0 +1 @@ +10.150.0.1 diff --git a/plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/service-accounts/default/token.json b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/service-accounts/default/token similarity index 100% rename from plugins/cloud-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/service-accounts/default/token.json rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/computeMetadata/v1/instance/service-accounts/default/token diff --git a/plugins/cloud-gce/src/test/resources/rest-api-spec/test/cloud_gce/10_basic.yaml b/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yaml similarity index 57% rename from plugins/cloud-gce/src/test/resources/rest-api-spec/test/cloud_gce/10_basic.yaml rename to plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yaml index 9cff52e6ca3..8f5fbdc4ab4 100644 --- a/plugins/cloud-gce/src/test/resources/rest-api-spec/test/cloud_gce/10_basic.yaml +++ b/plugins/discovery-gce/src/test/resources/rest-api-spec/test/discovery_gce/10_basic.yaml @@ -1,6 +1,6 @@ -# Integration tests for Cloud GCE components +# Integration tests for Discovery GCE components # -"Cloud GCE loaded": +"Discovery GCE loaded": - do: cluster.state: {} @@ -10,5 +10,5 @@ - do: nodes.info: {} - - match: { nodes.$master.plugins.0.name: cloud-gce } + - match: { nodes.$master.plugins.0.name: discovery-gce } - match: { nodes.$master.plugins.0.jvm: true } diff --git a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java index 6cba9ae9517..fa6e91bed10 100644 --- a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java +++ b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.messy.tests; -import java.nio.charset.StandardCharsets; - import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -49,21 +47,15 @@ import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.concurrent.CyclicBarrier; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; public class BulkTests extends ESIntegTestCase { @@ -190,8 +182,8 @@ public class BulkTests extends ESIntegTestCase { assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l)); bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) - .add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(client().prepareIndex("test", "type", "e1").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(client().prepareIndex("test", "type", "e2").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) .add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)).get(); assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated()); diff --git a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java index 99b334e27db..51fc5a4de8b 100644 --- a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java +++ b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java @@ -19,20 +19,12 @@ package org.elasticsearch.messy.tests; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.search.function.CombineFunction; -import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; -import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; -import org.elasticsearch.index.query.functionscore.weight.WeightBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.groovy.GroovyPlugin; @@ -49,385 +41,26 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; public class FunctionScoreTests extends ESIntegTestCase { static final String TYPE = "type"; static final String INDEX = "index"; - static final String TEXT_FIELD = "text_field"; - static final String DOUBLE_FIELD = "double_field"; - static final String GEO_POINT_FIELD = "geo_point_field"; - static final XContentBuilder SIMPLE_DOC; - static final XContentBuilder MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD; @Override protected Collection> nodePlugins() { return Collections.singleton(GroovyPlugin.class); } - - @Test - public void testExplainQueryOnlyOnce() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate("test").addMapping( - "type1", - jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string") - .endObject().startObject("num").field("type", "float").endObject().endObject().endObject().endObject())); - ensureYellow(); - client().prepareIndex() - .setType("type1") - .setId("1") - .setIndex("test") - .setSource( - jsonBuilder().startObject().field("test", "value").field("num", 10).endObject()).get(); - refresh(); - - SearchResponse response = client().search( - searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().explain(true).query( - functionScoreQuery(termQuery("test", "value"), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ - new FunctionScoreQueryBuilder.FilterFunctionBuilder(gaussDecayFunction("num", 5, 5)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(exponentialDecayFunction("num", 5, 5)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(linearDecayFunction("num", 5, 5)) - })))).get(); - String explanation = response.getHits().getAt(0).explanation().toString(); - - checkQueryExplanationAppearsOnlyOnce(explanation); - response = client().search( - searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().explain(true).query( - functionScoreQuery(termQuery("test", "value"), fieldValueFactorFunction("num"))))).get(); - explanation = response.getHits().getAt(0).explanation().toString(); - checkQueryExplanationAppearsOnlyOnce(explanation); - - response = client().search( - searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().explain(true).query( - functionScoreQuery(termQuery("test", "value"), randomFunction(10))))).get(); - explanation = response.getHits().getAt(0).explanation().toString(); - - checkQueryExplanationAppearsOnlyOnce(explanation); - } - - private void checkQueryExplanationAppearsOnlyOnce(String explanation) { - // use some substring of the query explanation and see if it appears twice - String queryExplanation = "idf(docFreq=1, maxDocs=1)"; - int queryExplanationIndex = explanation.indexOf(queryExplanation, 0); - assertThat(queryExplanationIndex, greaterThan(-1)); - queryExplanationIndex = explanation.indexOf(queryExplanation, queryExplanationIndex + 1); - assertThat(queryExplanationIndex, equalTo(-1)); - } - - static { - XContentBuilder simpleDoc; - XContentBuilder mappingWithDoubleAndGeoPointAndTestField; - try { - simpleDoc = jsonBuilder().startObject() - .field(TEXT_FIELD, "value") - .startObject(GEO_POINT_FIELD) - .field("lat", 10) - .field("lon", 20) - .endObject() - .field(DOUBLE_FIELD, Math.E) - .endObject(); - } catch (IOException e) { - throw new ElasticsearchException("Exception while initializing FunctionScoreIT", e); - } - SIMPLE_DOC = simpleDoc; - try { - - mappingWithDoubleAndGeoPointAndTestField = jsonBuilder().startObject() - .startObject(TYPE) - .startObject("properties") - .startObject(TEXT_FIELD) - .field("type", "string") - .endObject() - .startObject(GEO_POINT_FIELD) - .field("type", "geo_point") - .endObject() - .startObject(DOUBLE_FIELD) - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject(); - } catch (IOException e) { - throw new ElasticsearchException("Exception while initializing FunctionScoreIT", e); - } - MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD = mappingWithDoubleAndGeoPointAndTestField; - } - - @Test - public void testExplain() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate(INDEX).addMapping( - TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD - )); - ensureYellow(); - - index(INDEX, TYPE, "1", SIMPLE_DOC); - refresh(); - - SearchResponse responseWithWeights = client().search( - searchRequest().source( - searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ - new FunctionScoreQueryBuilder.FilterFunctionBuilder(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km")), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")).setWeight(3)) - })).explain(true))).actionGet(); - - assertThat( - responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = min of:\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: *:*\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: *:*\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: *:*\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"[script: _index['text_field']['value'].tf(), type: inline, lang: null, params: null]\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n")); - responseWithWeights = client().search( - searchRequest().source( - searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), weightFactorFunction(4.0f))) - .explain(true))).actionGet(); - assertThat( - responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = min of:\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n")); - - } - - @Test - public void simpleWeightedFunctionsTest() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate(INDEX).addMapping( - TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD - )); - ensureYellow(); - - index(INDEX, TYPE, "1", SIMPLE_DOC); - refresh(); - SearchResponse response = client().search( - searchRequest().source( - searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ - new FunctionScoreQueryBuilder.FilterFunctionBuilder(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km")), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()"))) - })))).actionGet(); - SearchResponse responseWithWeights = client().search( - searchRequest().source( - searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ - new FunctionScoreQueryBuilder.FilterFunctionBuilder(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km").setWeight(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")).setWeight(2)) - })))).actionGet(); - - assertSearchResponse(response); - assertThat(response.getHits().getAt(0).getScore(), is(1.0f)); - assertThat(responseWithWeights.getHits().getAt(0).getScore(), is(8.0f)); - } - - @Test - public void simpleWeightedFunctionsTestWithRandomWeightsAndRandomCombineMode() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate(INDEX).addMapping( - TYPE, - MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD)); - ensureYellow(); - - XContentBuilder doc = SIMPLE_DOC; - index(INDEX, TYPE, "1", doc); - refresh(); - ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuilders(); - float[] weights = createRandomWeights(scoreFunctionBuilders.length); - float[] scores = getScores(scoreFunctionBuilders); - int weightscounter = 0; - FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders = new FunctionScoreQueryBuilder.FilterFunctionBuilder[scoreFunctionBuilders.length]; - for (ScoreFunctionBuilder builder : scoreFunctionBuilders) { - filterFunctionBuilders[weightscounter] = new FunctionScoreQueryBuilder.FilterFunctionBuilder(builder.setWeight(weights[weightscounter])); - weightscounter++; - } - FiltersFunctionScoreQuery.ScoreMode scoreMode = randomFrom(FiltersFunctionScoreQuery.ScoreMode.AVG, FiltersFunctionScoreQuery.ScoreMode.SUM, - FiltersFunctionScoreQuery.ScoreMode.MIN, FiltersFunctionScoreQuery.ScoreMode.MAX, FiltersFunctionScoreQuery.ScoreMode.MULTIPLY); - FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), filterFunctionBuilders).scoreMode(scoreMode); - - SearchResponse responseWithWeights = client().search( - searchRequest().source(searchSource().query(withWeights)) - ).actionGet(); - - double expectedScore = computeExpectedScore(weights, scores, scoreMode); - assertThat((float) expectedScore / responseWithWeights.getHits().getAt(0).getScore(), is(1.0f)); - } - - protected double computeExpectedScore(float[] weights, float[] scores, FiltersFunctionScoreQuery.ScoreMode scoreMode) { - double expectedScore; - switch(scoreMode) { - case MULTIPLY: - expectedScore = 1.0; - break; - case MAX: - expectedScore = Float.MAX_VALUE * -1.0; - break; - case MIN: - expectedScore = Float.MAX_VALUE; - break; - default: - expectedScore = 0.0; - break; - } - - float weightSum = 0; - for (int i = 0; i < weights.length; i++) { - double functionScore = (double) weights[i] * scores[i]; - weightSum += weights[i]; - switch(scoreMode) { - case AVG: - expectedScore += functionScore; - break; - case MAX: - expectedScore = Math.max(functionScore, expectedScore); - break; - case MIN: - expectedScore = Math.min(functionScore, expectedScore); - break; - case SUM: - expectedScore += functionScore; - break; - case MULTIPLY: - expectedScore *= functionScore; - break; - default: - throw new UnsupportedOperationException(); - } - } - if (scoreMode == FiltersFunctionScoreQuery.ScoreMode.AVG) { - expectedScore /= weightSum; - } - return expectedScore; - } - - @Test - public void simpleWeightedFunctionsTestSingleFunction() throws IOException, ExecutionException, InterruptedException { - assertAcked(prepareCreate(INDEX).addMapping( - TYPE, - MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD)); - ensureYellow(); - - XContentBuilder doc = jsonBuilder().startObject() - .field(TEXT_FIELD, "value") - .startObject(GEO_POINT_FIELD) - .field("lat", 12) - .field("lon", 21) - .endObject() - .field(DOUBLE_FIELD, 10) - .endObject(); - index(INDEX, TYPE, "1", doc); - refresh(); - ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuilders(); - ScoreFunctionBuilder scoreFunctionBuilder = scoreFunctionBuilders[randomInt(3)]; - float[] weights = createRandomWeights(1); - float[] scores = getScores(scoreFunctionBuilder); - FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), scoreFunctionBuilder.setWeight(weights[0])); - - SearchResponse responseWithWeights = client().search( - searchRequest().source(searchSource().query(withWeights)) - ).actionGet(); - - assertThat( (double) scores[0] * weights[0]/ responseWithWeights.getHits().getAt(0).getScore(), closeTo(1.0, 1.e-6)); - - } - - private float[] getScores(ScoreFunctionBuilder... scoreFunctionBuilders) { - float[] scores = new float[scoreFunctionBuilders.length]; - int scorecounter = 0; - for (ScoreFunctionBuilder builder : scoreFunctionBuilders) { - SearchResponse response = client().search( - searchRequest().source( - searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")), builder) - ))).actionGet(); - scores[scorecounter] = response.getHits().getAt(0).getScore(); - scorecounter++; - } - return scores; - } - - private float[] createRandomWeights(int size) { - float[] weights = new float[size]; - for (int i = 0; i < weights.length; i++) { - weights[i] = randomFloat() * (randomBoolean() ? 1.0f : -1.0f) * randomInt(100) + 1.e-6f; - } - return weights; - } - - public ScoreFunctionBuilder[] getScoreFunctionBuilders() { - ScoreFunctionBuilder[] builders = new ScoreFunctionBuilder[4]; - builders[0] = gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km"); - builders[1] = randomFunction(10); - builders[2] = fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN); - builders[3] = scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")); - return builders; - } - - @Test - public void checkWeightOnlyCreatesBoostFunction() throws IOException { - assertAcked(prepareCreate(INDEX).addMapping( - TYPE, - MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD)); - ensureYellow(); - - index(INDEX, TYPE, "1", SIMPLE_DOC); - refresh(); - String query =jsonBuilder().startObject() - .startObject("query") - .startObject("function_score") - .startArray("functions") - .startObject() - .field("weight",2) - .endObject() - .endArray() - .endObject() - .endObject() - .endObject().string(); - SearchResponse response = client().search( - searchRequest().source(new BytesArray(query)) - ).actionGet(); - assertSearchResponse(response); - assertThat(response.getHits().getAt(0).score(), equalTo(2.0f)); - - query =jsonBuilder().startObject() - .startObject("query") - .startObject("function_score") - .field("weight",2) - .endObject() - .endObject() - .endObject().string(); - response = client().search( - searchRequest().source(new BytesArray(query)) - ).actionGet(); - assertSearchResponse(response); - assertThat(response.getHits().getAt(0).score(), equalTo(2.0f)); - response = client().search( - searchRequest().source(searchSource().query(functionScoreQuery(new WeightBuilder().setWeight(2.0f)))) - ).actionGet(); - assertSearchResponse(response); - assertThat(response.getHits().getAt(0).score(), equalTo(2.0f)); - response = client().search( - searchRequest().source(searchSource().query(functionScoreQuery(weightFactorFunction(2.0f)))) - ).actionGet(); - assertSearchResponse(response); - assertThat(response.getHits().getAt(0).score(), equalTo(2.0f)); - } @Test public void testScriptScoresNested() throws IOException { diff --git a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/package-info.java b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/package-info.java index c4d93668709..a0d2c785c5b 100644 --- a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/package-info.java +++ b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/package-info.java @@ -80,7 +80,6 @@ renamed: core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java renamed: core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TopHitsTests.java renamed: core/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TransformOnIndexMapperTests.java - renamed: core/src/test/java/org/elasticsearch/update/UpdateIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/UpdateTests.java renamed: core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ValueCountTests.java renamed: core/src/main/java/org/elasticsearch/script/groovy/GroovyScriptCompilationException.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyRestIT.java renamed: core/src/test/java/org/elasticsearch/script/GroovyScriptIT.java -> plugins/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java index a753ffa2e12..fd60607e2e6 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java @@ -68,7 +68,10 @@ public class PythonSecurityTests extends ESTestCase { fail("did not get expected exception"); } catch (PyException expected) { Throwable cause = expected.getCause(); - assertNotNull("null cause for exception: " + expected, cause); + // TODO: fix jython localization bugs: https://github.com/elastic/elasticsearch/issues/13967 + // this is the correct assert: + // assertNotNull("null cause for exception: " + expected, cause); + assertNotNull("null cause for exception", cause); assertTrue("unexpected exception: " + cause, cause instanceof SecurityException); } } diff --git a/plugins/pom.xml b/plugins/pom.xml index d248c7f6fa0..d080a55e82f 100644 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -388,10 +388,10 @@ analysis-phonetic analysis-smartcn analysis-stempel - cloud-gce delete-by-query discovery-azure discovery-ec2 + discovery-gce discovery-multicast lang-expression lang-groovy diff --git a/pom.xml b/pom.xml index f966923f8c4..7242c5c7376 100644 --- a/pom.xml +++ b/pom.xml @@ -986,8 +986,7 @@ ${elasticsearch.license.headerDefinition} - src/main/java/org/elasticsearch/**/*.java - src/test/java/org/elasticsearch/**/*.java + src/**/*.java diff --git a/qa/smoke-test-plugins/pom.xml b/qa/smoke-test-plugins/pom.xml index 238ea9c7f6b..f9d4b79080a 100644 --- a/qa/smoke-test-plugins/pom.xml +++ b/qa/smoke-test-plugins/pom.xml @@ -271,7 +271,7 @@ org.elasticsearch.plugin - cloud-gce + discovery-gce ${elasticsearch.version} zip true diff --git a/qa/vagrant/pom.xml b/qa/vagrant/pom.xml index c0f75356113..e303da5b6a2 100644 --- a/qa/vagrant/pom.xml +++ b/qa/vagrant/pom.xml @@ -143,7 +143,7 @@ org.elasticsearch.plugin - cloud-gce + discovery-gce ${elasticsearch.version} zip diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash index 4907cdf7ac3..f48532cb3f3 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash @@ -72,38 +72,39 @@ verify_package_installation() { getent group elasticsearch - assert_file "$ESHOME" d root 755 - assert_file "$ESHOME/bin" d root 755 - assert_file "$ESHOME/lib" d root 755 - assert_file "$ESCONFIG" d root 755 - assert_file "$ESCONFIG/elasticsearch.yml" f root 644 - assert_file "$ESCONFIG/logging.yml" f root 644 - assert_file "$ESDATA" d elasticsearch 755 - assert_file "$ESLOG" d elasticsearch 755 - assert_file "$ESPLUGINS" d elasticsearch 755 - assert_file "$ESPIDDIR" d elasticsearch 755 - assert_file "$ESHOME/NOTICE.txt" f root 644 - assert_file "$ESHOME/README.textile" f root 644 + assert_file "$ESHOME" d root root 755 + assert_file "$ESHOME/bin" d root root 755 + assert_file "$ESHOME/lib" d root root 755 + assert_file "$ESCONFIG" d root elasticsearch 750 + assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 750 + assert_file "$ESCONFIG/logging.yml" f root elasticsearch 750 + assert_file "$ESSCRIPTS" d root elasticsearch 750 + assert_file "$ESDATA" d elasticsearch elasticsearch 755 + assert_file "$ESLOG" d elasticsearch elasticsearch 755 + assert_file "$ESPLUGINS" d elasticsearch elasticsearch 755 + assert_file "$ESPIDDIR" d elasticsearch elasticsearch 755 + assert_file "$ESHOME/NOTICE.txt" f root root 644 + assert_file "$ESHOME/README.textile" f root root 644 if is_dpkg; then # Env file - assert_file "/etc/default/elasticsearch" f root 644 + assert_file "/etc/default/elasticsearch" f root root 644 # Doc files - assert_file "/usr/share/doc/elasticsearch" d root 755 - assert_file "/usr/share/doc/elasticsearch/copyright" f root 644 + assert_file "/usr/share/doc/elasticsearch" d root root 755 + assert_file "/usr/share/doc/elasticsearch/copyright" f root root 644 fi if is_rpm; then # Env file - assert_file "/etc/sysconfig/elasticsearch" f root 644 + assert_file "/etc/sysconfig/elasticsearch" f root root 644 # License file - assert_file "/usr/share/elasticsearch/LICENSE.txt" f root 644 + assert_file "/usr/share/elasticsearch/LICENSE.txt" f root root 644 fi if is_systemd; then - assert_file "/usr/lib/systemd/system/elasticsearch.service" f root 644 - assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root 644 - assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root 644 + assert_file "/usr/lib/systemd/system/elasticsearch.service" f root root 644 + assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root root 644 + assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root root 644 fi } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index 7f24b8a1061..599f6bab513 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -150,7 +150,8 @@ assert_file() { local file="$1" local type=$2 local user=$3 - local privileges=$4 + local group=$4 + local privileges=$5 assert_file_exist "$file" @@ -167,6 +168,11 @@ assert_file() { [ "$realuser" = "$user" ] fi + if [ "x$group" != "x" ]; then + realgroup=$(find "$file" -maxdepth 0 -printf "%g") + [ "$realgroup" = "$group" ] + fi + if [ "x$privileges" != "x" ]; then realprivileges=$(find "$file" -maxdepth 0 -printf "%m") [ "$realprivileges" = "$privileges" ] @@ -239,36 +245,13 @@ clean_before_test() { start_elasticsearch_service() { local desiredStatus=${1:-green} - if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then - # su and the Elasticsearch init script work together to break bats. - # sudo isolates bats enough from the init script so everything continues - # to tick along - sudo -u elasticsearch /tmp/elasticsearch/bin/elasticsearch -d \ - -p /tmp/elasticsearch/elasticsearch.pid - elif is_systemd; then - run systemctl daemon-reload - [ "$status" -eq 0 ] - - run systemctl enable elasticsearch.service - [ "$status" -eq 0 ] - - run systemctl is-enabled elasticsearch.service - [ "$status" -eq 0 ] - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] - - elif is_sysvinit; then - run service elasticsearch start - [ "$status" -eq 0 ] - fi + run_elasticsearch_service 0 wait_for_elasticsearch_status $desiredStatus if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then pid=$(cat /tmp/elasticsearch/elasticsearch.pid) [ "x$pid" != "x" ] && [ "$pid" -gt 0 ] - echo "Looking for elasticsearch pid...." ps $pid elif is_systemd; then @@ -284,6 +267,64 @@ start_elasticsearch_service() { fi } +# Start elasticsearch +# $1 expected status code +# $2 additional command line args +run_elasticsearch_service() { + local expectedStatus=$1 + local commandLineArgs=$2 + # Set the CONF_DIR setting in case we start as a service + if [ ! -z "$CONF_DIR" ] ; then + if is_dpkg ; then + echo "CONF_DIR=$CONF_DIR" >> /etc/default/elasticsearch; + elif is_rpm; then + echo "CONF_DIR=$CONF_DIR" >> /etc/sysconfig/elasticsearch; + fi + fi + + if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then + if [ -z "$CONF_DIR" ]; then + local CONF_DIR="" + fi + # we must capture the exit code to compare so we don't want to start as background process in case we expect something other than 0 + local background="" + local timeoutCommand="" + if [ "$expectedStatus" = 0 ]; then + background="-d" + else + timeoutCommand="timeout 60s " + fi + # su and the Elasticsearch init script work together to break bats. + # sudo isolates bats enough from the init script so everything continues + # to tick along + run sudo -u elasticsearch bash <> /etc/default/elasticsearch; + elif is_rpm; then + echo "CONF_FILE=$CONF_FILE" >> /etc/sysconfig/elasticsearch; + fi + + run_elasticsearch_service 1 -Des.default.config="$CONF_FILE" + + # remove settings again otherwise cleaning up before next testrun will fail + if is_dpkg ; then + sudo sed -i '/CONF_FILE/d' /etc/default/elasticsearch + elif is_rpm; then + sudo sed -i '/CONF_FILE/d' /etc/sysconfig/elasticsearch + fi +} + @test "[$GROUP] install jvm-example plugin with a custom path.plugins" { # Clean up after the last time this test was run rm -rf /tmp/plugins.* @@ -111,6 +140,9 @@ fi move_config CONF_DIR="$ESCONFIG" install_jvm_example + CONF_DIR="$ESCONFIG" start_elasticsearch_service + diff <(curl -s localhost:9200/_cat/configured_example | sed 's/ //g') <(echo "foo") + stop_elasticsearch_service CONF_DIR="$ESCONFIG" remove_jvm_example } @@ -172,7 +204,7 @@ fi } @test "[$GROUP] install gce plugin" { - install_and_check_plugin cloud gce google-api-client-*.jar + install_and_check_plugin discovery gce google-api-client-*.jar } @test "[$GROUP] install delete by query plugin" { @@ -276,7 +308,7 @@ fi } @test "[$GROUP] remove gce plugin" { - remove_plugin cloud-gce + remove_plugin discovery-gce } @test "[$GROUP] remove delete by query plugin" { @@ -357,21 +389,38 @@ fi local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | wc -l) - [ "$loglines" = "6" ] || { - echo "Expected 6 lines but the output was:" - cat /tmp/plugin-cli-output - false - } + if [ "$GROUP" == "TAR PLUGINS" ]; then + # tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created + [ "$loglines" -eq "7" ] || { + echo "Expected 7 lines but the output was:" + cat /tmp/plugin-cli-output + false + } + else + [ "$loglines" -eq "6" ] || { + echo "Expected 6 lines but the output was:" + cat /tmp/plugin-cli-output + false + } + fi remove_jvm_example local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$relativePath" -Des.logger.level=DEBUG > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | wc -l) - [ "$loglines" -gt "6" ] || { - echo "Expected more than 6 lines but the output was:" - cat /tmp/plugin-cli-output - false - } + if [ "$GROUP" == "TAR PLUGINS" ]; then + [ "$loglines" -gt "7" ] || { + echo "Expected more than 7 lines but the output was:" + cat /tmp/plugin-cli-output + false + } + else + [ "$loglines" -gt "6" ] || { + echo "Expected more than 6 lines but the output was:" + cat /tmp/plugin-cli-output + false + } + fi remove_jvm_example } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json index 42c13dceae1..2bccb20f36e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json @@ -20,6 +20,10 @@ "type": "boolean", "description": "Whether to return time and byte values in human-readable format.", "default": false + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index 14fae15ff86..5c426f962a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -13,7 +13,8 @@ }, "feature":{ "type":"list", - "description":"A comma-separated list of features" + "description":"A comma-separated list of features", + "options": ["_settings", "_mappings", "_warmers", "_aliases"] } }, "params":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json index 5b4978216dd..854cde1a9e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json @@ -24,7 +24,7 @@ "type" : "number", "description" : "Specify the number of threads to provide information for (default: 3)" }, - "ignore_idle_threads": { + "ignore_idle_threads": { "type" : "boolean", "description" : "Don't show threads that are in known-idle places, such as waiting on a socket select or pulling from an empty task queue (default: true)" }, @@ -32,6 +32,10 @@ "type" : "enum", "options" : ["cpu", "wait", "block"], "description" : "The type to sample (default: cpu)" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json index d8044c8a5f6..43be35a5a86 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json @@ -25,6 +25,10 @@ "type": "boolean", "description": "Whether to return time and byte values in human-readable format.", "default": false + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index 5eef2c18d32..874294102c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -59,6 +59,10 @@ "types" : { "type" : "list", "description" : "A comma-separated list of document types for the `indexing` index metric" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 20fc3524283..37a04cbae28 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -82,6 +82,10 @@ "type": "enum", "options": ["internal", "force"], "description": "Specific version type" + }, + "detect_noop": { + "type": "boolean", + "description": "Specifying as true will cause Elasticsearch to check if there are changes and, if there aren’t, turn the update request into a noop." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml deleted file mode 100644 index 8ee11b0fa02..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -"External version": - - - do: - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 5 - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external - version: 6 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_gte_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_gte_version.yaml deleted file mode 100644 index febb7a5065e..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/36_external_gte_version.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -"External version": - - - do: - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 5 - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: external_gte - version: 6 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/37_force_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/37_force_version.yaml deleted file mode 100644 index 393d16fdc70..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/37_force_version.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -"External version": - - - do: - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: force - version: 5 - - - match: { _version: 5} - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: force - version: 5 - - - do: - catch: conflict - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - version_type: force - version: 6 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yaml index 3f13b099a30..17c4806c693 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yaml @@ -2,7 +2,7 @@ "Internal version": - do: - catch: conflict + catch: missing update: index: test_1 type: test @@ -10,7 +10,14 @@ version: 1 body: doc: { foo: baz } - upsert: { foo: bar } + + - do: + index: + index: test_1 + type: test + id: 1 + body: + doc: { foo: baz } - do: catch: conflict @@ -18,7 +25,6 @@ index: test_1 type: test id: 1 - version: 1 + version: 2 body: doc: { foo: baz } - upsert: { foo: bar }