mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-26 18:08:36 +00:00
Merge branch 'master' into immutable_map_be_gone
This commit is contained in:
commit
d9e11e4b39
@ -74,7 +74,7 @@ Then sit back and wait. There will probably be discussion about the pull request
|
|||||||
Contributing to the Elasticsearch codebase
|
Contributing to the Elasticsearch codebase
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
|
|
||||||
**Repository:** [https://github.com/elasticsearch/elasticsearch](https://github.com/elastic/elasticsearch)
|
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
|
||||||
|
|
||||||
Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace` and make sure to select `Search for nested projects...` option as Elasticsearch is a multi-module maven project. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. Please make sure the [m2e-connector](http://marketplace.eclipse.org/content/m2e-connector-maven-dependency-plugin) is not installed in your Eclipse distribution as it will interfere with setup performed by `mvn eclipse:eclipse`.
|
Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace` and make sure to select `Search for nested projects...` option as Elasticsearch is a multi-module maven project. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. Please make sure the [m2e-connector](http://marketplace.eclipse.org/content/m2e-connector-maven-dependency-plugin) is not installed in your Eclipse distribution as it will interfere with setup performed by `mvn eclipse:eclipse`.
|
||||||
|
|
||||||
|
12
Vagrantfile
vendored
12
Vagrantfile
vendored
@ -32,7 +32,10 @@ Vagrant.configure(2) do |config|
|
|||||||
end
|
end
|
||||||
config.vm.define "vivid" do |config|
|
config.vm.define "vivid" do |config|
|
||||||
config.vm.box = "ubuntu/vivid64"
|
config.vm.box = "ubuntu/vivid64"
|
||||||
ubuntu_common config
|
ubuntu_common config, extra: <<-SHELL
|
||||||
|
# Install Jayatana so we can work around it being present.
|
||||||
|
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||||
|
SHELL
|
||||||
end
|
end
|
||||||
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
|
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
|
||||||
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
||||||
@ -116,11 +119,11 @@ SOURCE_PROMPT
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def ubuntu_common(config)
|
def ubuntu_common(config, extra: '')
|
||||||
deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*'
|
deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*', extra: extra
|
||||||
end
|
end
|
||||||
|
|
||||||
def deb_common(config, add_openjdk_repository_command, openjdk_list)
|
def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '')
|
||||||
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
|
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
|
||||||
config.vm.provision "fix-no-tty", type: "shell" do |s|
|
config.vm.provision "fix-no-tty", type: "shell" do |s|
|
||||||
s.privileged = false
|
s.privileged = false
|
||||||
@ -137,6 +140,7 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list)
|
|||||||
(echo "Importing java-8 ppa" &&
|
(echo "Importing java-8 ppa" &&
|
||||||
#{add_openjdk_repository_command} &&
|
#{add_openjdk_repository_command} &&
|
||||||
apt-get update)
|
apt-get update)
|
||||||
|
#{extra}
|
||||||
SHELL
|
SHELL
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
11
core/pom.xml
11
core/pom.xml
@ -372,12 +372,17 @@
|
|||||||
<excludes>
|
<excludes>
|
||||||
<!-- Guice -->
|
<!-- Guice -->
|
||||||
<exclude>src/main/java/org/elasticsearch/common/inject/**</exclude>
|
<exclude>src/main/java/org/elasticsearch/common/inject/**</exclude>
|
||||||
<exclude>src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java</exclude>
|
<!-- Forks of Lucene classes -->
|
||||||
<exclude>src/main/java/org/apache/lucene/**/X*.java</exclude>
|
<exclude>src/main/java/org/apache/lucene/**/X*.java</exclude>
|
||||||
<!-- t-digest -->
|
|
||||||
<exclude>src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java</exclude>
|
|
||||||
<!-- netty pipelining -->
|
<!-- netty pipelining -->
|
||||||
<exclude>src/main/java/org/elasticsearch/http/netty/pipelining/**</exclude>
|
<exclude>src/main/java/org/elasticsearch/http/netty/pipelining/**</exclude>
|
||||||
|
<!-- Guava -->
|
||||||
|
<exclude>src/main/java/org/elasticsearch/common/network/InetAddresses.java</exclude>
|
||||||
|
<exclude>src/test/java/org/elasticsearch/common/network/InetAddressesTests.java</exclude>
|
||||||
|
<exclude>src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java</exclude>
|
||||||
|
<!-- Joda -->
|
||||||
|
<exclude>src/main/java/org/joda/time/base/BaseDateTime.java</exclude>
|
||||||
|
<exclude>src/main/java/org/joda/time/format/StrictISODateTimeFormat.java</exclude>
|
||||||
</excludes>
|
</excludes>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
/*
|
/*
|
||||||
* Licensed to Elasticsearch under one
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* license agreements. See the NOTICE file distributed with
|
||||||
* distributed with this work for additional information
|
* this work for additional information regarding copyright
|
||||||
* regarding copyright ownership. Elasticsearch licenses this
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
* file to you under the Apache License, Version 2.0 (the
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
* "License"); you may not use this file except in compliance
|
* not use this file except in compliance with the License.
|
||||||
* with the License. You may obtain a copy of the License at
|
* You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing,
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
* software distributed under the License is distributed on an
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
* License for the specific language governing permissions and limitations under
|
* KIND, either express or implied. See the License for the
|
||||||
* the License.
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.lucene.search.postingshighlight;
|
package org.apache.lucene.search.postingshighlight;
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
/*
|
/*
|
||||||
* Licensed to Elasticsearch under one
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* license agreements. See the NOTICE file distributed with
|
||||||
* distributed with this work for additional information
|
* this work for additional information regarding copyright
|
||||||
* regarding copyright ownership. Elasticsearch licenses this
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
* file to you under the Apache License, Version 2.0 (the
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
* "License"); you may not use this file except in compliance
|
* not use this file except in compliance with the License.
|
||||||
* with the License. You may obtain a copy of the License at
|
* You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing,
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
* software distributed under the License is distributed on an
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
* License for the specific language governing permissions and limitations under
|
* KIND, either express or implied. See the License for the
|
||||||
* the License.
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.lucene.search.postingshighlight;
|
package org.apache.lucene.search.postingshighlight;
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
/*
|
/*
|
||||||
* Licensed to Elasticsearch under one
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* license agreements. See the NOTICE file distributed with
|
||||||
* distributed with this work for additional information
|
* this work for additional information regarding copyright
|
||||||
* regarding copyright ownership. Elasticsearch licenses this
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
* file to you under the Apache License, Version 2.0 (the
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
* "License"); you may not use this file except in compliance
|
* not use this file except in compliance with the License.
|
||||||
* with the License. You may obtain a copy of the License at
|
* You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing,
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
* software distributed under the License is distributed on an
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
* License for the specific language governing permissions and limitations under
|
* KIND, either express or implied. See the License for the
|
||||||
* the License.
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.lucene.search.postingshighlight;
|
package org.apache.lucene.search.postingshighlight;
|
||||||
|
@ -482,7 +482,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||||
CREATE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.CreateFailedEngineException.class, org.elasticsearch.index.engine.CreateFailedEngineException::new, 22),
|
// 22 was CreateFailedEngineException
|
||||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||||
@ -514,7 +514,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||||
DOCUMENT_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, org.elasticsearch.index.engine.DocumentAlreadyExistsException::new, 54),
|
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||||
@ -524,7 +524,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||||
DELETE_BY_QUERY_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, org.elasticsearch.index.engine.DeleteByQueryFailedEngineException::new, 64),
|
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
|
||||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||||
|
@ -259,6 +259,8 @@ public class Version {
|
|||||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||||
public static final int V_2_0_0_beta2_ID = 2000002;
|
public static final int V_2_0_0_beta2_ID = 2000002;
|
||||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||||
|
public static final int V_2_0_0_rc1_ID = 2000051;
|
||||||
|
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||||
public static final int V_2_0_0_ID = 2000099;
|
public static final int V_2_0_0_ID = 2000099;
|
||||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||||
public static final int V_2_1_0_ID = 2010099;
|
public static final int V_2_1_0_ID = 2010099;
|
||||||
@ -287,6 +289,8 @@ public class Version {
|
|||||||
return V_2_1_0;
|
return V_2_1_0;
|
||||||
case V_2_0_0_ID:
|
case V_2_0_0_ID:
|
||||||
return V_2_0_0;
|
return V_2_0_0;
|
||||||
|
case V_2_0_0_rc1_ID:
|
||||||
|
return V_2_0_0_rc1;
|
||||||
case V_2_0_0_beta2_ID:
|
case V_2_0_0_beta2_ID:
|
||||||
return V_2_0_0_beta2;
|
return V_2_0_0_beta2;
|
||||||
case V_2_0_0_beta1_ID:
|
case V_2_0_0_beta1_ID:
|
||||||
|
@ -121,8 +121,8 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
|||||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateAction;
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.TransportRenderSearchTemplateAction;
|
import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
|
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
||||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action;
|
|||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
import org.elasticsearch.common.collect.HppcMaps;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
@ -32,8 +33,8 @@ import java.io.IOException;
|
|||||||
*/
|
*/
|
||||||
public class UnavailableShardsException extends ElasticsearchException {
|
public class UnavailableShardsException extends ElasticsearchException {
|
||||||
|
|
||||||
public UnavailableShardsException(@Nullable ShardId shardId, String message) {
|
public UnavailableShardsException(@Nullable ShardId shardId, String message, Object... args) {
|
||||||
super(buildMessage(shardId, message));
|
super(buildMessage(shardId, message), args);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String buildMessage(ShardId shardId, String message) {
|
private static String buildMessage(ShardId shardId, String message) {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.validate.template;
|
package org.elasticsearch.action.admin.cluster.validate.template;
|
||||||
|
|
||||||
import org.elasticsearch.action.Action;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
@ -25,7 +25,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||||||
public class RenderSearchTemplateAction extends Action<RenderSearchTemplateRequest, RenderSearchTemplateResponse, RenderSearchTemplateRequestBuilder> {
|
public class RenderSearchTemplateAction extends Action<RenderSearchTemplateRequest, RenderSearchTemplateResponse, RenderSearchTemplateRequestBuilder> {
|
||||||
|
|
||||||
public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction();
|
public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction();
|
||||||
public static final String NAME = "indices:admin/render/template/search";
|
public static final String NAME = "cluster:admin/render/template/search";
|
||||||
|
|
||||||
public RenderSearchTemplateAction() {
|
public RenderSearchTemplateAction() {
|
||||||
super(NAME);
|
super(NAME);
|
@ -17,7 +17,7 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.validate.template;
|
package org.elasticsearch.action.admin.cluster.validate.template;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
@ -17,7 +17,7 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.validate.template;
|
package org.elasticsearch.action.admin.cluster.validate.template;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequestBuilder;
|
import org.elasticsearch.action.ActionRequestBuilder;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
@ -17,7 +17,7 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.validate.template;
|
package org.elasticsearch.action.admin.cluster.validate.template;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
@ -17,7 +17,7 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.validate.template;
|
package org.elasticsearch.action.admin.cluster.validate.template;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
@ -19,9 +19,9 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.segments;
|
package org.elasticsearch.action.admin.indices.segments;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
public class IndexShardSegments implements Iterable<ShardSegments> {
|
public class IndexShardSegments implements Iterable<ShardSegments> {
|
||||||
@ -49,6 +49,6 @@ public class IndexShardSegments implements Iterable<ShardSegments> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<ShardSegments> iterator() {
|
public Iterator<ShardSegments> iterator() {
|
||||||
return Iterators.forArray(shards);
|
return Arrays.stream(shards).iterator();
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,13 +19,13 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.stats;
|
package org.elasticsearch.action.admin.indices.stats;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -57,7 +57,7 @@ public class IndexShardStats implements Iterable<ShardStats>, Streamable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<ShardStats> iterator() {
|
public Iterator<ShardStats> iterator() {
|
||||||
return Iterators.forArray(shards);
|
return Arrays.stream(shards).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
private CommonStats total = null;
|
private CommonStats total = null;
|
||||||
|
@ -19,9 +19,9 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
||||||
@ -49,7 +49,7 @@ public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<ShardUpgradeStatus> iterator() {
|
public Iterator<ShardUpgradeStatus> iterator() {
|
||||||
return Iterators.forArray(shards);
|
return Arrays.stream(shards).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getTotalBytes() {
|
public long getTotalBytes() {
|
||||||
|
@ -19,13 +19,13 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.bulk;
|
package org.elasticsearch.action.bulk;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -95,7 +95,7 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<BulkItemResponse> iterator() {
|
public Iterator<BulkItemResponse> iterator() {
|
||||||
return Iterators.forArray(responses);
|
return Arrays.stream(responses).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -47,7 +47,6 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||||
import org.elasticsearch.index.mapper.Mapping;
|
import org.elasticsearch.index.mapper.Mapping;
|
||||||
@ -97,6 +96,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||||||
protected TransportRequestOptions transportOptions() {
|
protected TransportRequestOptions transportOptions() {
|
||||||
return BulkAction.INSTANCE.transportOptions(settings);
|
return BulkAction.INSTANCE.transportOptions(settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected BulkShardResponse newResponseInstance() {
|
protected BulkShardResponse newResponseInstance() {
|
||||||
return new BulkShardResponse();
|
return new BulkShardResponse();
|
||||||
@ -416,7 +416,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
t = ExceptionsHelper.unwrapCause(t);
|
t = ExceptionsHelper.unwrapCause(t);
|
||||||
boolean retry = false;
|
boolean retry = false;
|
||||||
if (t instanceof VersionConflictEngineException || (t instanceof DocumentAlreadyExistsException && translate.operation() == UpdateHelper.Operation.UPSERT)) {
|
if (t instanceof VersionConflictEngineException) {
|
||||||
retry = true;
|
retry = true;
|
||||||
}
|
}
|
||||||
return new UpdateResult(translate, indexRequest, retry, t, null);
|
return new UpdateResult(translate, indexRequest, retry, t, null);
|
||||||
@ -460,20 +460,12 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).index(shardId.getIndex()).type(indexRequest.type()).id(indexRequest.id())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).index(shardId.getIndex()).type(indexRequest.type()).id(indexRequest.id())
|
||||||
.routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
|
.routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
|
||||||
|
|
||||||
final Engine.IndexingOperation operation;
|
final Engine.Index operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA);
|
||||||
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
|
|
||||||
operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA);
|
|
||||||
} else {
|
|
||||||
assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType();
|
|
||||||
operation = indexShard.prepareCreate(sourceToParse,
|
|
||||||
indexRequest.version(), indexRequest.versionType(),
|
|
||||||
Engine.Operation.Origin.REPLICA);
|
|
||||||
}
|
|
||||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||||
if (update != null) {
|
if (update != null) {
|
||||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||||
}
|
}
|
||||||
operation.execute(indexShard);
|
indexShard.index(operation);
|
||||||
location = locationToSync(location, operation.getTranslogLocation());
|
location = locationToSync(location, operation.getTranslogLocation());
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
// if its not an ignore replica failure, we need to make sure to bubble up the failure
|
// if its not an ignore replica failure, we need to make sure to bubble up the failure
|
||||||
@ -500,7 +492,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
processAfter(request.refresh(), indexShard, location);
|
processAfter(request.refresh(), indexShard, location);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {
|
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {
|
||||||
|
@ -174,7 +174,12 @@ public class TransportExistsAction extends TransportBroadcastAction<ExistsReques
|
|||||||
}
|
}
|
||||||
context.preProcess();
|
context.preProcess();
|
||||||
try {
|
try {
|
||||||
boolean exists = Lucene.exists(context, context.query(), Lucene.createExistsCollector());
|
boolean exists;
|
||||||
|
try {
|
||||||
|
exists = Lucene.exists(context.searcher(), context.query());
|
||||||
|
} finally {
|
||||||
|
context.clearReleasables(SearchContext.Lifetime.COLLECTION);
|
||||||
|
}
|
||||||
return new ShardExistsResponse(request.shardId(), exists);
|
return new ShardExistsResponse(request.shardId(), exists);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new QueryPhaseExecutionException(context, "failed to execute exists", e);
|
throw new QueryPhaseExecutionException(context, "failed to execute exists", e);
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.get;
|
package org.elasticsearch.action.get;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.*;
|
import org.elasticsearch.action.*;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
@ -27,6 +26,7 @@ import org.elasticsearch.common.Nullable;
|
|||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
@ -37,10 +37,7 @@ import org.elasticsearch.index.VersionType;
|
|||||||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
|
public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
|
||||||
|
|
||||||
@ -498,7 +495,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Item> iterator() {
|
public Iterator<Item> iterator() {
|
||||||
return Iterators.unmodifiableIterator(items.iterator());
|
return Collections.unmodifiableCollection(items).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -19,10 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.get;
|
package org.elasticsearch.action.get;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
@ -31,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContent {
|
public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContent {
|
||||||
@ -126,7 +124,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<MultiGetItemResponse> iterator() {
|
public Iterator<MultiGetItemResponse> iterator() {
|
||||||
return Iterators.forArray(responses);
|
return Arrays.stream(responses).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -49,14 +49,14 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||||||
/**
|
/**
|
||||||
* Index request to index a typed JSON document into a specific index and make it searchable. Best
|
* Index request to index a typed JSON document into a specific index and make it searchable. Best
|
||||||
* created using {@link org.elasticsearch.client.Requests#indexRequest(String)}.
|
* created using {@link org.elasticsearch.client.Requests#indexRequest(String)}.
|
||||||
* <p>
|
*
|
||||||
* The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and
|
* The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and
|
||||||
* {@link #source(byte[])} to be set.
|
* {@link #source(byte[])} to be set.
|
||||||
* <p>
|
*
|
||||||
* The source (content to index) can be set in its bytes form using ({@link #source(byte[])}),
|
* The source (content to index) can be set in its bytes form using ({@link #source(byte[])}),
|
||||||
* its string form ({@link #source(String)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder}
|
* its string form ({@link #source(String)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder}
|
||||||
* ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}).
|
* ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}).
|
||||||
* <p>
|
*
|
||||||
* If the {@link #id(String)} is not set, it will be automatically generated.
|
* If the {@link #id(String)} is not set, it will be automatically generated.
|
||||||
*
|
*
|
||||||
* @see IndexResponse
|
* @see IndexResponse
|
||||||
@ -114,7 +114,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||||||
|
|
||||||
public static OpType fromString(String sOpType) {
|
public static OpType fromString(String sOpType) {
|
||||||
String lowersOpType = sOpType.toLowerCase(Locale.ROOT);
|
String lowersOpType = sOpType.toLowerCase(Locale.ROOT);
|
||||||
switch(lowersOpType){
|
switch (lowersOpType) {
|
||||||
case "create":
|
case "create":
|
||||||
return OpType.CREATE;
|
return OpType.CREATE;
|
||||||
case "index":
|
case "index":
|
||||||
@ -216,6 +216,14 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||||||
if (source == null) {
|
if (source == null) {
|
||||||
validationException = addValidationError("source is missing", validationException);
|
validationException = addValidationError("source is missing", validationException);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (opType() == OpType.CREATE) {
|
||||||
|
if (versionType != VersionType.INTERNAL || version != Versions.MATCH_DELETED) {
|
||||||
|
validationException = addValidationError("create operations do not support versioning. use index instead", validationException);
|
||||||
|
return validationException;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!versionType.validateVersionForWrites(version)) {
|
if (!versionType.validateVersionForWrites(version)) {
|
||||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||||
}
|
}
|
||||||
@ -370,7 +378,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the document source to index.
|
* Sets the document source to index.
|
||||||
* <p>
|
*
|
||||||
* Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}
|
* Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}
|
||||||
* or using the {@link #source(byte[])}.
|
* or using the {@link #source(byte[])}.
|
||||||
*/
|
*/
|
||||||
@ -480,6 +488,10 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||||||
*/
|
*/
|
||||||
public IndexRequest opType(OpType opType) {
|
public IndexRequest opType(OpType opType) {
|
||||||
this.opType = opType;
|
this.opType = opType;
|
||||||
|
if (opType == OpType.CREATE) {
|
||||||
|
version(Versions.MATCH_DELETED);
|
||||||
|
versionType(VersionType.INTERNAL);
|
||||||
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ import org.elasticsearch.transport.TransportService;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs the index operation.
|
* Performs the index operation.
|
||||||
* <p>
|
*
|
||||||
* Allows for the following settings:
|
* Allows for the following settings:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li><b>autoCreateIndex</b>: When set to <tt>true</tt>, will automatically create an index if one does not exists.
|
* <li><b>autoCreateIndex</b>: When set to <tt>true</tt>, will automatically create an index if one does not exists.
|
||||||
@ -167,6 +167,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
|
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
|
||||||
|
|
||||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
|
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
|
||||||
|
|
||||||
final IndexResponse response = result.response;
|
final IndexResponse response = result.response;
|
||||||
final Translog.Location location = result.location;
|
final Translog.Location location = result.location;
|
||||||
processAfter(request.refresh(), indexShard, location);
|
processAfter(request.refresh(), indexShard, location);
|
||||||
@ -180,18 +181,12 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
|
||||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||||
|
|
||||||
final Engine.IndexingOperation operation;
|
final Engine.Index operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
||||||
if (request.opType() == IndexRequest.OpType.INDEX) {
|
|
||||||
operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
|
||||||
} else {
|
|
||||||
assert request.opType() == IndexRequest.OpType.CREATE : request.opType();
|
|
||||||
operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
|
||||||
}
|
|
||||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||||
if (update != null) {
|
if (update != null) {
|
||||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||||
}
|
}
|
||||||
operation.execute(indexShard);
|
indexShard.index(operation);
|
||||||
processAfter(request.refresh(), indexShard, operation.getTranslogLocation());
|
processAfter(request.refresh(), indexShard, operation.getTranslogLocation());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.elasticsearch.action.percolate;
|
package org.elasticsearch.action.percolate;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -52,7 +52,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterable<M
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Item> iterator() {
|
public Iterator<Item> iterator() {
|
||||||
return Iterators.forArray(items);
|
return Arrays.stream(items).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -19,12 +19,12 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.search;
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -69,6 +69,8 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.field(Fields.SUCCEEDED, succeeded);
|
||||||
|
builder.field(Fields.NUMFREED, numFreed);
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,4 +87,10 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
|||||||
out.writeBoolean(succeeded);
|
out.writeBoolean(succeeded);
|
||||||
out.writeVInt(numFreed);
|
out.writeVInt(numFreed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static final class Fields {
|
||||||
|
static final XContentBuilderString SUCCEEDED = new XContentBuilderString("succeeded");
|
||||||
|
static final XContentBuilderString NUMFREED = new XContentBuilderString("num_freed");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.search;
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -32,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -122,7 +121,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Item> iterator() {
|
public Iterator<Item> iterator() {
|
||||||
return Iterators.forArray(items);
|
return Arrays.stream(items).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.*;
|
import org.elasticsearch.transport.*;
|
||||||
@ -95,17 +96,22 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||||||
|
|
||||||
private final NodesRequest request;
|
private final NodesRequest request;
|
||||||
private final String[] nodesIds;
|
private final String[] nodesIds;
|
||||||
|
private final DiscoveryNode[] nodes;
|
||||||
private final ActionListener<NodesResponse> listener;
|
private final ActionListener<NodesResponse> listener;
|
||||||
private final ClusterState clusterState;
|
|
||||||
private final AtomicReferenceArray<Object> responses;
|
private final AtomicReferenceArray<Object> responses;
|
||||||
private final AtomicInteger counter = new AtomicInteger();
|
private final AtomicInteger counter = new AtomicInteger();
|
||||||
|
|
||||||
private AsyncAction(NodesRequest request, ActionListener<NodesResponse> listener) {
|
private AsyncAction(NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||||
this.request = request;
|
this.request = request;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
clusterState = clusterService.state();
|
ClusterState clusterState = clusterService.state();
|
||||||
String[] nodesIds = resolveNodes(request, clusterState);
|
String[] nodesIds = resolveNodes(request, clusterState);
|
||||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||||
|
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
|
||||||
|
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||||
|
for (int i = 0; i < nodesIds.length; i++) {
|
||||||
|
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||||
|
}
|
||||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +134,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||||||
for (int i = 0; i < nodesIds.length; i++) {
|
for (int i = 0; i < nodesIds.length; i++) {
|
||||||
final String nodeId = nodesIds[i];
|
final String nodeId = nodesIds[i];
|
||||||
final int idx = i;
|
final int idx = i;
|
||||||
final DiscoveryNode node = clusterState.nodes().nodes().get(nodeId);
|
final DiscoveryNode node = nodes[i];
|
||||||
try {
|
try {
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||||
|
@ -56,7 +56,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||||
import org.elasticsearch.index.mapper.Mapping;
|
import org.elasticsearch.index.mapper.Mapping;
|
||||||
@ -188,9 +187,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
if (cause instanceof VersionConflictEngineException) {
|
if (cause instanceof VersionConflictEngineException) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (cause instanceof DocumentAlreadyExistsException) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1036,22 +1032,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
|
|
||||||
/** Utility method to create either an index or a create operation depending
|
/** Utility method to create either an index or a create operation depending
|
||||||
* on the {@link OpType} of the request. */
|
* on the {@link OpType} of the request. */
|
||||||
private final Engine.IndexingOperation prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
|
||||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||||
if (request.opType() == IndexRequest.OpType.INDEX) {
|
|
||||||
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
||||||
} else {
|
|
||||||
assert request.opType() == IndexRequest.OpType.CREATE : request.opType();
|
|
||||||
return indexShard.prepareCreate(sourceToParse,
|
|
||||||
request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Execute the given {@link IndexRequest} on a primary shard, throwing a
|
/** Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
|
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
|
||||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
|
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||||
Engine.IndexingOperation operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||||
final ShardId shardId = indexShard.shardId();
|
final ShardId shardId = indexShard.shardId();
|
||||||
if (update != null) {
|
if (update != null) {
|
||||||
@ -1064,7 +1055,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||||||
"Dynamics mappings are not available on the node that holds the primary yet");
|
"Dynamics mappings are not available on the node that holds the primary yet");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final boolean created = operation.execute(indexShard);
|
final boolean created = indexShard.index(operation);
|
||||||
|
|
||||||
// update the version on request so it will happen on the replicas
|
// update the version on request so it will happen on the replicas
|
||||||
final long version = operation.version();
|
final long version = operation.version();
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.support.single.instance;
|
package org.elasticsearch.action.support.single.instance;
|
||||||
|
|
||||||
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.action.UnavailableShardsException;
|
import org.elasticsearch.action.UnavailableShardsException;
|
||||||
@ -35,6 +36,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
@ -42,6 +44,7 @@ import org.elasticsearch.node.NodeClosedException;
|
|||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.*;
|
import org.elasticsearch.transport.*;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeoutException;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
@ -111,9 +114,8 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
private volatile ClusterStateObserver observer;
|
private volatile ClusterStateObserver observer;
|
||||||
private ShardIterator shardIt;
|
private ShardIterator shardIt;
|
||||||
private DiscoveryNodes nodes;
|
private DiscoveryNodes nodes;
|
||||||
private final AtomicBoolean operationStarted = new AtomicBoolean();
|
|
||||||
|
|
||||||
private AsyncSingleAction(Request request, ActionListener<Response> listener) {
|
AsyncSingleAction(Request request, ActionListener<Response> listener) {
|
||||||
this.request = request;
|
this.request = request;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
}
|
}
|
||||||
@ -123,14 +125,14 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
doStart();
|
doStart();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected boolean doStart() {
|
protected void doStart() {
|
||||||
nodes = observer.observedState().nodes();
|
nodes = observer.observedState().nodes();
|
||||||
try {
|
try {
|
||||||
ClusterBlockException blockException = checkGlobalBlock(observer.observedState());
|
ClusterBlockException blockException = checkGlobalBlock(observer.observedState());
|
||||||
if (blockException != null) {
|
if (blockException != null) {
|
||||||
if (blockException.retryable()) {
|
if (blockException.retryable()) {
|
||||||
retry(blockException);
|
retry(blockException);
|
||||||
return false;
|
return;
|
||||||
} else {
|
} else {
|
||||||
throw blockException;
|
throw blockException;
|
||||||
}
|
}
|
||||||
@ -138,13 +140,14 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||||
// check if we need to execute, and if not, return
|
// check if we need to execute, and if not, return
|
||||||
if (!resolveRequest(observer.observedState(), request, listener)) {
|
if (!resolveRequest(observer.observedState(), request, listener)) {
|
||||||
return true;
|
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("{} request {} could not be resolved", new ShardId(request.index, request.shardId), actionName)));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
blockException = checkRequestBlock(observer.observedState(), request);
|
blockException = checkRequestBlock(observer.observedState(), request);
|
||||||
if (blockException != null) {
|
if (blockException != null) {
|
||||||
if (blockException.retryable()) {
|
if (blockException.retryable()) {
|
||||||
retry(blockException);
|
retry(blockException);
|
||||||
return false;
|
return;
|
||||||
} else {
|
} else {
|
||||||
throw blockException;
|
throw blockException;
|
||||||
}
|
}
|
||||||
@ -152,13 +155,13 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
shardIt = shards(observer.observedState(), request);
|
shardIt = shards(observer.observedState(), request);
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
return true;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// no shardIt, might be in the case between index gateway recovery and shardIt initialization
|
// no shardIt, might be in the case between index gateway recovery and shardIt initialization
|
||||||
if (shardIt.size() == 0) {
|
if (shardIt.size() == 0) {
|
||||||
retry(null);
|
retry(null);
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// this transport only make sense with an iterator that returns a single shard routing (like primary)
|
// this transport only make sense with an iterator that returns a single shard routing (like primary)
|
||||||
@ -169,11 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
|
|
||||||
if (!shard.active()) {
|
if (!shard.active()) {
|
||||||
retry(null);
|
retry(null);
|
||||||
return false;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
if (!operationStarted.compareAndSet(false, true)) {
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request.shardId = shardIt.shardId().id();
|
request.shardId = shardIt.shardId().id();
|
||||||
@ -197,24 +196,30 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
|
Throwable cause = exp.unwrapCause();
|
||||||
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
||||||
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
|
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||||
retryOnFailure(exp)) {
|
retryOnFailure(exp)) {
|
||||||
operationStarted.set(false);
|
retry(cause);
|
||||||
// we already marked it as started when we executed it (removed the listener) so pass false
|
|
||||||
// to re-add to the cluster listener
|
|
||||||
retry(null);
|
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(exp);
|
listener.onFailure(exp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void retry(final @Nullable Throwable failure) {
|
void retry(final @Nullable Throwable failure) {
|
||||||
if (observer.isTimedOut()) {
|
if (observer.isTimedOut()) {
|
||||||
// we running as a last attempt after a timeout has happened. don't retry
|
// we running as a last attempt after a timeout has happened. don't retry
|
||||||
|
Throwable listenFailure = failure;
|
||||||
|
if (listenFailure == null) {
|
||||||
|
if (shardIt == null) {
|
||||||
|
listenFailure = new UnavailableShardsException(new ShardId(request.concreteIndex(), -1), "Timeout waiting for [{}], request: {}", request.timeout(), actionName);
|
||||||
|
} else {
|
||||||
|
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
listener.onFailure(listenFailure);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,17 +237,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||||||
@Override
|
@Override
|
||||||
public void onTimeout(TimeValue timeout) {
|
public void onTimeout(TimeValue timeout) {
|
||||||
// just to be on the safe side, see if we can start it now?
|
// just to be on the safe side, see if we can start it now?
|
||||||
if (!doStart()) {
|
doStart();
|
||||||
Throwable listenFailure = failure;
|
|
||||||
if (listenFailure == null) {
|
|
||||||
if (shardIt == null) {
|
|
||||||
listenFailure = new UnavailableShardsException(new ShardId(request.concreteIndex(), -1), "Timeout waiting for [" + timeout + "], request: " + request.toString());
|
|
||||||
} else {
|
|
||||||
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeout + "], request: " + request.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
listener.onFailure(listenFailure);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, request.timeout());
|
}, request.timeout());
|
||||||
}
|
}
|
||||||
|
@ -19,11 +19,11 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.*;
|
import org.elasticsearch.action.*;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
@ -74,7 +74,7 @@ public class MultiTermVectorsRequest extends ActionRequest<MultiTermVectorsReque
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<TermVectorsRequest> iterator() {
|
public Iterator<TermVectorsRequest> iterator() {
|
||||||
return Iterators.unmodifiableIterator(requests.iterator());
|
return Collections.unmodifiableCollection(requests).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isEmpty() {
|
public boolean isEmpty() {
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContent {
|
public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContent {
|
||||||
@ -120,7 +120,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<MultiTermVectorsItemResponse> iterator() {
|
public Iterator<MultiTermVectorsItemResponse> iterator() {
|
||||||
return Iterators.forArray(responses);
|
return Arrays.stream(responses).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -48,9 +48,8 @@ import org.elasticsearch.common.inject.Inject;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
|
||||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
|
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
@ -170,7 +169,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
||||||
switch (result.operation()) {
|
switch (result.operation()) {
|
||||||
case UPSERT:
|
case UPSERT:
|
||||||
IndexRequest upsertRequest = new IndexRequest((IndexRequest)result.action(), request);
|
IndexRequest upsertRequest = new IndexRequest(result.action(), request);
|
||||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||||
final BytesReference upsertSourceBytes = upsertRequest.source();
|
final BytesReference upsertSourceBytes = upsertRequest.source();
|
||||||
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
|
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
|
||||||
@ -189,7 +188,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable e) {
|
public void onFailure(Throwable e) {
|
||||||
e = ExceptionsHelper.unwrapCause(e);
|
e = ExceptionsHelper.unwrapCause(e);
|
||||||
if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) {
|
if (e instanceof VersionConflictEngineException) {
|
||||||
if (retryCount < request.retryOnConflict()) {
|
if (retryCount < request.retryOnConflict()) {
|
||||||
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
|
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
|
||||||
@Override
|
@Override
|
||||||
@ -205,7 +204,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
case INDEX:
|
case INDEX:
|
||||||
IndexRequest indexRequest = new IndexRequest((IndexRequest)result.action(), request);
|
IndexRequest indexRequest = new IndexRequest(result.action(), request);
|
||||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||||
final BytesReference indexSourceBytes = indexRequest.source();
|
final BytesReference indexSourceBytes = indexRequest.source();
|
||||||
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
|
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
|
||||||
@ -235,7 +234,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
case DELETE:
|
case DELETE:
|
||||||
DeleteRequest deleteRequest = new DeleteRequest((DeleteRequest)result.action(), request);
|
DeleteRequest deleteRequest = new DeleteRequest(result.action(), request);
|
||||||
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
|
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(DeleteResponse response) {
|
public void onResponse(DeleteResponse response) {
|
||||||
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.PidFile;
|
|||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.cli.CliTool;
|
import org.elasticsearch.common.cli.CliTool;
|
||||||
import org.elasticsearch.common.cli.Terminal;
|
import org.elasticsearch.common.cli.Terminal;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
|
||||||
import org.elasticsearch.common.inject.CreationException;
|
import org.elasticsearch.common.inject.CreationException;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
@ -249,13 +248,13 @@ final class Bootstrap {
|
|||||||
|
|
||||||
Environment environment = initialSettings(foreground);
|
Environment environment = initialSettings(foreground);
|
||||||
Settings settings = environment.settings();
|
Settings settings = environment.settings();
|
||||||
|
setupLogging(settings, environment);
|
||||||
|
checkForCustomConfFile();
|
||||||
|
|
||||||
if (environment.pidFile() != null) {
|
if (environment.pidFile() != null) {
|
||||||
PidFile.create(environment.pidFile(), true);
|
PidFile.create(environment.pidFile(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
setupLogging(settings, environment);
|
|
||||||
|
|
||||||
if (System.getProperty("es.max-open-files", "false").equals("true")) {
|
if (System.getProperty("es.max-open-files", "false").equals("true")) {
|
||||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||||
logger.info("max_open_files [{}]", ProcessProbe.getInstance().getMaxFileDescriptorCount());
|
logger.info("max_open_files [{}]", ProcessProbe.getInstance().getMaxFileDescriptorCount());
|
||||||
@ -330,4 +329,21 @@ final class Bootstrap {
|
|||||||
System.err.flush();
|
System.err.flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void checkForCustomConfFile() {
|
||||||
|
String confFileSetting = System.getProperty("es.default.config");
|
||||||
|
checkUnsetAndMaybeExit(confFileSetting, "es.default.config");
|
||||||
|
confFileSetting = System.getProperty("es.config");
|
||||||
|
checkUnsetAndMaybeExit(confFileSetting, "es.config");
|
||||||
|
confFileSetting = System.getProperty("elasticsearch.config");
|
||||||
|
checkUnsetAndMaybeExit(confFileSetting, "elasticsearch.config");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
|
||||||
|
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
|
||||||
|
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||||
|
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
|
||||||
|
System.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ final class Security {
|
|||||||
Map<String,String> m = new HashMap<>();
|
Map<String,String> m = new HashMap<>();
|
||||||
m.put("repository-s3", "org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin");
|
m.put("repository-s3", "org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin");
|
||||||
m.put("discovery-ec2", "org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin");
|
m.put("discovery-ec2", "org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin");
|
||||||
m.put("cloud-gce", "org.elasticsearch.plugin.cloud.gce.CloudGcePlugin");
|
m.put("discovery-gce", "org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin");
|
||||||
m.put("lang-expression", "org.elasticsearch.script.expression.ExpressionPlugin");
|
m.put("lang-expression", "org.elasticsearch.script.expression.ExpressionPlugin");
|
||||||
m.put("lang-groovy", "org.elasticsearch.script.groovy.GroovyPlugin");
|
m.put("lang-groovy", "org.elasticsearch.script.groovy.GroovyPlugin");
|
||||||
m.put("lang-javascript", "org.elasticsearch.plugin.javascript.JavaScriptPlugin");
|
m.put("lang-javascript", "org.elasticsearch.plugin.javascript.JavaScriptPlugin");
|
||||||
|
@ -21,9 +21,6 @@ package org.elasticsearch.client;
|
|||||||
|
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest;
|
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder;
|
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse;
|
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
|
@ -77,6 +77,9 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
|||||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
|
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
|
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder;
|
||||||
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Administrative actions/operations against indices.
|
* Administrative actions/operations against indices.
|
||||||
@ -423,4 +426,25 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||||||
*/
|
*/
|
||||||
SnapshotsStatusRequestBuilder prepareSnapshotStatus();
|
SnapshotsStatusRequestBuilder prepareSnapshotStatus();
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the rendered search request for a given search template.
|
||||||
|
*
|
||||||
|
* @param request The request
|
||||||
|
* @return The result future
|
||||||
|
*/
|
||||||
|
ActionFuture<RenderSearchTemplateResponse> renderSearchTemplate(RenderSearchTemplateRequest request);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the rendered search request for a given search template.
|
||||||
|
*
|
||||||
|
* @param request The request
|
||||||
|
* @param listener A listener to be notified of the result
|
||||||
|
*/
|
||||||
|
void renderSearchTemplate(RenderSearchTemplateRequest request, ActionListener<RenderSearchTemplateResponse> listener);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the rendered search request for a given search template.
|
||||||
|
*/
|
||||||
|
RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate();
|
||||||
}
|
}
|
||||||
|
@ -105,9 +105,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
|||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest;
|
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder;
|
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse;
|
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||||
@ -746,27 +743,6 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||||||
*/
|
*/
|
||||||
ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
|
ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the rendered search request for a given search template.
|
|
||||||
*
|
|
||||||
* @param request The request
|
|
||||||
* @return The result future
|
|
||||||
*/
|
|
||||||
ActionFuture<RenderSearchTemplateResponse> renderSearchTemplate(RenderSearchTemplateRequest request);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the rendered search request for a given search template.
|
|
||||||
*
|
|
||||||
* @param request The request
|
|
||||||
* @param listener A listener to be notified of the result
|
|
||||||
*/
|
|
||||||
void renderSearchTemplate(RenderSearchTemplateRequest request, ActionListener<RenderSearchTemplateResponse> listener);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the rendered search request for a given search template.
|
|
||||||
*/
|
|
||||||
RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Puts an index search warmer to be applies when applicable.
|
* Puts an index search warmer to be applies when applicable.
|
||||||
*/
|
*/
|
||||||
|
@ -208,10 +208,10 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction
|
|||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateAction;
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequest;
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.indices.validate.template.RenderSearchTemplateResponse;
|
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
||||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
||||||
@ -1142,6 +1142,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||||||
public SnapshotsStatusRequestBuilder prepareSnapshotStatus() {
|
public SnapshotsStatusRequestBuilder prepareSnapshotStatus() {
|
||||||
return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE);
|
return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ActionFuture<RenderSearchTemplateResponse> renderSearchTemplate(final RenderSearchTemplateRequest request) {
|
||||||
|
return execute(RenderSearchTemplateAction.INSTANCE, request);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void renderSearchTemplate(final RenderSearchTemplateRequest request, final ActionListener<RenderSearchTemplateResponse> listener) {
|
||||||
|
execute(RenderSearchTemplateAction.INSTANCE, request, listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate() {
|
||||||
|
return new RenderSearchTemplateRequestBuilder(this, RenderSearchTemplateAction.INSTANCE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class IndicesAdmin implements IndicesAdminClient {
|
static class IndicesAdmin implements IndicesAdminClient {
|
||||||
@ -1617,21 +1632,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||||||
return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices);
|
return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ActionFuture<RenderSearchTemplateResponse> renderSearchTemplate(final RenderSearchTemplateRequest request) {
|
|
||||||
return execute(RenderSearchTemplateAction.INSTANCE, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void renderSearchTemplate(final RenderSearchTemplateRequest request, final ActionListener<RenderSearchTemplateResponse> listener) {
|
|
||||||
execute(RenderSearchTemplateAction.INSTANCE, request, listener);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate() {
|
|
||||||
return new RenderSearchTemplateRequestBuilder(this, RenderSearchTemplateAction.INSTANCE);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
|
public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
|
||||||
return execute(PutWarmerAction.INSTANCE, request);
|
return execute(PutWarmerAction.INSTANCE, request);
|
||||||
|
@ -73,7 +73,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||||||
throw new IllegalArgumentException("_default_ mapping should not be updated");
|
throw new IllegalArgumentException("_default_ mapping should not be updated");
|
||||||
}
|
}
|
||||||
return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString())
|
return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString())
|
||||||
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
|
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata;
|
|||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.Diff;
|
import org.elasticsearch.cluster.Diff;
|
||||||
import org.elasticsearch.cluster.Diffable;
|
import org.elasticsearch.cluster.Diffable;
|
||||||
@ -29,8 +28,6 @@ import org.elasticsearch.cluster.DiffableUtils;
|
|||||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||||
import org.elasticsearch.cluster.routing.HashFunction;
|
|
||||||
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseFieldMatcher;
|
import org.elasticsearch.common.ParseFieldMatcher;
|
||||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
@ -167,16 +164,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||||||
public static final String SETTING_PRIORITY = "index.priority";
|
public static final String SETTING_PRIORITY = "index.priority";
|
||||||
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
||||||
public static final String SETTING_INDEX_UUID = "index.uuid";
|
public static final String SETTING_INDEX_UUID = "index.uuid";
|
||||||
public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type";
|
|
||||||
public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type";
|
|
||||||
public static final String SETTING_DATA_PATH = "index.data_path";
|
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||||
|
|
||||||
|
|
||||||
// hard-coded hash function as of 2.0
|
|
||||||
// older indices will read which hash function to use in their index settings
|
|
||||||
private static final HashFunction MURMUR3_HASH_FUNCTION = new Murmur3HashFunction();
|
|
||||||
|
|
||||||
private final String index;
|
private final String index;
|
||||||
private final long version;
|
private final long version;
|
||||||
@ -200,8 +193,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||||||
private final Version indexCreatedVersion;
|
private final Version indexCreatedVersion;
|
||||||
private final Version indexUpgradedVersion;
|
private final Version indexUpgradedVersion;
|
||||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||||
private final HashFunction routingHashFunction;
|
|
||||||
private final boolean useTypeForRouting;
|
|
||||||
|
|
||||||
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||||
if (settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null) == null) {
|
if (settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null) == null) {
|
||||||
@ -249,23 +240,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||||||
} else {
|
} else {
|
||||||
this.minimumCompatibleLuceneVersion = null;
|
this.minimumCompatibleLuceneVersion = null;
|
||||||
}
|
}
|
||||||
final String hashFunction = settings.get(SETTING_LEGACY_ROUTING_HASH_FUNCTION);
|
|
||||||
if (hashFunction == null) {
|
|
||||||
routingHashFunction = MURMUR3_HASH_FUNCTION;
|
|
||||||
} else {
|
|
||||||
final Class<? extends HashFunction> hashFunctionClass;
|
|
||||||
try {
|
|
||||||
hashFunctionClass = Class.forName(hashFunction).asSubclass(HashFunction.class);
|
|
||||||
} catch (ClassNotFoundException|NoClassDefFoundError e) {
|
|
||||||
throw new ElasticsearchException("failed to load custom hash function [" + hashFunction + "]", e);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
routingHashFunction = hashFunctionClass.newInstance();
|
|
||||||
} catch (InstantiationException | IllegalAccessException e) {
|
|
||||||
throw new IllegalStateException("Cannot instantiate hash function", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
useTypeForRouting = settings.getAsBoolean(SETTING_LEGACY_ROUTING_USE_TYPE, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String index() {
|
public String index() {
|
||||||
@ -335,29 +309,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||||||
return minimumCompatibleLuceneVersion;
|
return minimumCompatibleLuceneVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the {@link HashFunction} that should be used for routing.
|
|
||||||
*/
|
|
||||||
public HashFunction routingHashFunction() {
|
|
||||||
return routingHashFunction;
|
|
||||||
}
|
|
||||||
|
|
||||||
public HashFunction getRoutingHashFunction() {
|
|
||||||
return routingHashFunction();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return whether routing should use the _type in addition to the _id in
|
|
||||||
* order to decide which shard a document should go to.
|
|
||||||
*/
|
|
||||||
public boolean routingUseType() {
|
|
||||||
return useTypeForRouting;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean getRoutingUseType() {
|
|
||||||
return routingUseType();
|
|
||||||
}
|
|
||||||
|
|
||||||
public long creationDate() {
|
public long creationDate() {
|
||||||
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
||||||
}
|
}
|
||||||
|
@ -21,11 +21,7 @@ package org.elasticsearch.cluster.metadata;
|
|||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.routing.DjbHashFunction;
|
|
||||||
import org.elasticsearch.cluster.routing.HashFunction;
|
|
||||||
import org.elasticsearch.cluster.routing.SimpleHashFunction;
|
|
||||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
@ -34,8 +30,7 @@ import org.elasticsearch.index.Index;
|
|||||||
import org.elasticsearch.index.analysis.AnalysisService;
|
import org.elasticsearch.index.analysis.AnalysisService;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
import org.elasticsearch.index.store.IndexStoreModule;
|
|
||||||
import org.elasticsearch.script.ScriptService;
|
import org.elasticsearch.script.ScriptService;
|
||||||
|
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
@ -54,47 +49,12 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
|||||||
*/
|
*/
|
||||||
public class MetaDataIndexUpgradeService extends AbstractComponent {
|
public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||||
|
|
||||||
private static final String DEPRECATED_SETTING_ROUTING_HASH_FUNCTION = "cluster.routing.operation.hash.type";
|
|
||||||
private static final String DEPRECATED_SETTING_ROUTING_USE_TYPE = "cluster.routing.operation.use_type";
|
|
||||||
|
|
||||||
private final Class<? extends HashFunction> pre20HashFunction;
|
|
||||||
private final Boolean pre20UseType;
|
|
||||||
private final ScriptService scriptService;
|
private final ScriptService scriptService;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public MetaDataIndexUpgradeService(Settings settings, ScriptService scriptService) {
|
public MetaDataIndexUpgradeService(Settings settings, ScriptService scriptService) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.scriptService = scriptService;
|
this.scriptService = scriptService;
|
||||||
final String pre20HashFunctionName = settings.get(DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, null);
|
|
||||||
final boolean hasCustomPre20HashFunction = pre20HashFunctionName != null;
|
|
||||||
// the hash function package has changed we replace the two hash functions if their fully qualified name is used.
|
|
||||||
if (hasCustomPre20HashFunction) {
|
|
||||||
switch (pre20HashFunctionName) {
|
|
||||||
case "Simple":
|
|
||||||
case "simple":
|
|
||||||
case "org.elasticsearch.cluster.routing.operation.hash.simple.SimpleHashFunction":
|
|
||||||
pre20HashFunction = SimpleHashFunction.class;
|
|
||||||
break;
|
|
||||||
case "Djb":
|
|
||||||
case "djb":
|
|
||||||
case "org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction":
|
|
||||||
pre20HashFunction = DjbHashFunction.class;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
try {
|
|
||||||
pre20HashFunction = Class.forName(pre20HashFunctionName).asSubclass(HashFunction.class);
|
|
||||||
} catch (ClassNotFoundException|NoClassDefFoundError e) {
|
|
||||||
throw new ElasticsearchException("failed to load custom hash function [" + pre20HashFunctionName + "]", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pre20HashFunction = DjbHashFunction.class;
|
|
||||||
}
|
|
||||||
pre20UseType = settings.getAsBoolean(DEPRECATED_SETTING_ROUTING_USE_TYPE, null);
|
|
||||||
if (hasCustomPre20HashFunction || pre20UseType != null) {
|
|
||||||
logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they "
|
|
||||||
+ "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -110,68 +70,29 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||||||
return indexMetaData;
|
return indexMetaData;
|
||||||
}
|
}
|
||||||
checkSupportedVersion(indexMetaData);
|
checkSupportedVersion(indexMetaData);
|
||||||
IndexMetaData newMetaData = upgradeLegacyRoutingSettings(indexMetaData);
|
IndexMetaData newMetaData = indexMetaData;
|
||||||
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
|
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
|
||||||
checkMappingsCompatibility(newMetaData);
|
checkMappingsCompatibility(newMetaData);
|
||||||
newMetaData = upgradeSettings(newMetaData);
|
|
||||||
newMetaData = markAsUpgraded(newMetaData);
|
newMetaData = markAsUpgraded(newMetaData);
|
||||||
return newMetaData;
|
return newMetaData;
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexMetaData upgradeSettings(IndexMetaData indexMetaData) {
|
|
||||||
final String storeType = indexMetaData.getSettings().get(IndexStoreModule.STORE_TYPE);
|
|
||||||
if (storeType != null) {
|
|
||||||
final String upgradeStoreType;
|
|
||||||
switch (storeType.toLowerCase(Locale.ROOT)) {
|
|
||||||
case "nio_fs":
|
|
||||||
case "niofs":
|
|
||||||
upgradeStoreType = "niofs";
|
|
||||||
break;
|
|
||||||
case "mmap_fs":
|
|
||||||
case "mmapfs":
|
|
||||||
upgradeStoreType = "mmapfs";
|
|
||||||
break;
|
|
||||||
case "simple_fs":
|
|
||||||
case "simplefs":
|
|
||||||
upgradeStoreType = "simplefs";
|
|
||||||
break;
|
|
||||||
case "default":
|
|
||||||
upgradeStoreType = "default";
|
|
||||||
break;
|
|
||||||
case "fs":
|
|
||||||
upgradeStoreType = "fs";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
upgradeStoreType = storeType;
|
|
||||||
}
|
|
||||||
if (storeType.equals(upgradeStoreType) == false) {
|
|
||||||
Settings indexSettings = Settings.builder().put(indexMetaData.settings())
|
|
||||||
.put(IndexStoreModule.STORE_TYPE, upgradeStoreType)
|
|
||||||
.build();
|
|
||||||
return IndexMetaData.builder(indexMetaData)
|
|
||||||
.version(indexMetaData.version())
|
|
||||||
.settings(indexSettings)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return indexMetaData;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
|
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
|
||||||
*/
|
*/
|
||||||
private boolean isUpgraded(IndexMetaData indexMetaData) {
|
private boolean isUpgraded(IndexMetaData indexMetaData) {
|
||||||
return indexMetaData.upgradeVersion().onOrAfter(Version.V_2_0_0_beta1);
|
return indexMetaData.upgradeVersion().onOrAfter(Version.V_3_0_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Elasticsearch 2.0 no longer supports indices with pre Lucene v4.0 (Elasticsearch v 0.90.0) segments. All indices
|
* Elasticsearch 3.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices
|
||||||
* that were created before Elasticsearch v0.90.0 should be upgraded using upgrade plugin before they can
|
* that were created before Elasticsearch v2.0.0.beta1 should be upgraded using upgrade API before they can
|
||||||
* be open by this version of elasticsearch.
|
* be open by this version of elasticsearch.
|
||||||
*/
|
*/
|
||||||
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
||||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
||||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v0.90.0 and wasn't upgraded."
|
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded."
|
||||||
+ " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
+ " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
||||||
+ " and upgraded using the upgrade API.");
|
+ " and upgraded using the upgrade API.");
|
||||||
}
|
}
|
||||||
@ -181,44 +102,18 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||||||
* Returns true if this index can be supported by the current version of elasticsearch
|
* Returns true if this index can be supported by the current version of elasticsearch
|
||||||
*/
|
*/
|
||||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||||
if (indexMetaData.creationVersion().onOrAfter(Version.V_0_90_0_Beta1)) {
|
if (indexMetaData.creationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||||
// The index was created with elasticsearch that was using Lucene 4.0
|
// The index was created with elasticsearch that was using Lucene 5.2.1
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (indexMetaData.getMinimumCompatibleVersion() != null &&
|
if (indexMetaData.getMinimumCompatibleVersion() != null &&
|
||||||
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_4_0_0)) {
|
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) {
|
||||||
//The index was upgraded we can work with it
|
//The index was upgraded we can work with it
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Elasticsearch 2.0 deprecated custom routing hash functions. So what we do here is that for old indices, we
|
|
||||||
* move this old and deprecated node setting to an index setting so that we can keep things backward compatible.
|
|
||||||
*/
|
|
||||||
private IndexMetaData upgradeLegacyRoutingSettings(IndexMetaData indexMetaData) {
|
|
||||||
if (indexMetaData.settings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) == null
|
|
||||||
&& indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) {
|
|
||||||
// these settings need an upgrade
|
|
||||||
Settings indexSettings = Settings.builder().put(indexMetaData.settings())
|
|
||||||
.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, pre20HashFunction)
|
|
||||||
.put(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE, pre20UseType == null ? false : pre20UseType)
|
|
||||||
.build();
|
|
||||||
return IndexMetaData.builder(indexMetaData)
|
|
||||||
.version(indexMetaData.version())
|
|
||||||
.settings(indexSettings)
|
|
||||||
.build();
|
|
||||||
} else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
|
||||||
if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null
|
|
||||||
|| indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) {
|
|
||||||
throw new IllegalStateException("Index [" + indexMetaData.getIndex() + "] created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION
|
|
||||||
+ "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in its index settings");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return indexMetaData;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** All known byte-sized settings for an index. */
|
/** All known byte-sized settings for an index. */
|
||||||
public static final Set<String> INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
public static final Set<String> INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
||||||
"index.merge.policy.floor_segment",
|
"index.merge.policy.floor_segment",
|
||||||
@ -322,11 +217,11 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||||||
Index index = new Index(indexMetaData.getIndex());
|
Index index = new Index(indexMetaData.getIndex());
|
||||||
Settings settings = indexMetaData.settings();
|
Settings settings = indexMetaData.settings();
|
||||||
try {
|
try {
|
||||||
SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
|
SimilarityService similarityService = new SimilarityService(index, settings);
|
||||||
// We cannot instantiate real analysis server at this point because the node might not have
|
// We cannot instantiate real analysis server at this point because the node might not have
|
||||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||||
try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) {
|
try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) {
|
||||||
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, scriptService)) {
|
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityService, scriptService)) {
|
||||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||||
MappingMetaData mappingMetaData = cursor.value;
|
MappingMetaData mappingMetaData = cursor.value;
|
||||||
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.routing.DjbHashFunction;
|
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.math.MathUtils;
|
import org.elasticsearch.common.math.MathUtils;
|
||||||
@ -43,6 +43,6 @@ public class MetaDataService extends AbstractComponent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Semaphore indexMetaDataLock(String index) {
|
public Semaphore indexMetaDataLock(String index) {
|
||||||
return indexMdLocks[MathUtils.mod(DjbHashFunction.DJB_HASH(index), indexMdLocks.length)];
|
return indexMdLocks[MathUtils.mod(Murmur3HashFunction.hash(index), indexMdLocks.length)];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,70 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
|
||||||
|
|
||||||
import org.elasticsearch.cluster.routing.HashFunction;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class implements the efficient hash function
|
|
||||||
* developed by <i>Daniel J. Bernstein</i>.
|
|
||||||
*/
|
|
||||||
public class DjbHashFunction implements HashFunction {
|
|
||||||
|
|
||||||
public static int DJB_HASH(String value) {
|
|
||||||
long hash = 5381;
|
|
||||||
|
|
||||||
for (int i = 0; i < value.length(); i++) {
|
|
||||||
hash = ((hash << 5) + hash) + value.charAt(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (int) hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static int DJB_HASH(byte[] value, int offset, int length) {
|
|
||||||
long hash = 5381;
|
|
||||||
|
|
||||||
final int end = offset + length;
|
|
||||||
for (int i = offset; i < end; i++) {
|
|
||||||
hash = ((hash << 5) + hash) + value[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return (int) hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hash(String routing) {
|
|
||||||
return DJB_HASH(routing);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hash(String type, String id) {
|
|
||||||
long hash = 5381;
|
|
||||||
|
|
||||||
for (int i = 0; i < type.length(); i++) {
|
|
||||||
hash = ((hash << 5) + hash) + type.charAt(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < id.length(); i++) {
|
|
||||||
hash = ((hash << 5) + hash) + id.charAt(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (int) hash;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,42 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple hash function interface used for shard routing.
|
|
||||||
*/
|
|
||||||
public interface HashFunction {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculate a hash value for routing
|
|
||||||
* @param routing String to calculate the hash value from
|
|
||||||
* @return hash value of the given routing string
|
|
||||||
*/
|
|
||||||
int hash(String routing);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculate a hash value for routing and its type
|
|
||||||
* @param type types name
|
|
||||||
* @param id String to calculate the hash value from
|
|
||||||
* @return hash value of the given type and routing string
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
int hash(String type, String id);
|
|
||||||
}
|
|
@ -20,15 +20,17 @@
|
|||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import org.apache.lucene.util.StringHelper;
|
import org.apache.lucene.util.StringHelper;
|
||||||
import org.elasticsearch.cluster.routing.HashFunction;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Hash function based on the Murmur3 algorithm, which is the default as of Elasticsearch 2.0.
|
* Hash function based on the Murmur3 algorithm, which is the default as of Elasticsearch 2.0.
|
||||||
*/
|
*/
|
||||||
public class Murmur3HashFunction implements HashFunction {
|
public final class Murmur3HashFunction {
|
||||||
|
|
||||||
@Override
|
private Murmur3HashFunction() {
|
||||||
public int hash(String routing) {
|
//no instance
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int hash(String routing) {
|
||||||
final byte[] bytesToHash = new byte[routing.length() * 2];
|
final byte[] bytesToHash = new byte[routing.length() * 2];
|
||||||
for (int i = 0; i < routing.length(); ++i) {
|
for (int i = 0; i < routing.length(); ++i) {
|
||||||
final char c = routing.charAt(i);
|
final char c = routing.charAt(i);
|
||||||
@ -37,12 +39,10 @@ public class Murmur3HashFunction implements HashFunction {
|
|||||||
bytesToHash[i * 2] = b1;
|
bytesToHash[i * 2] = b1;
|
||||||
bytesToHash[i * 2 + 1] = b2;
|
bytesToHash[i * 2 + 1] = b2;
|
||||||
}
|
}
|
||||||
return StringHelper.murmurhash3_x86_32(bytesToHash, 0, bytesToHash.length, 0);
|
return hash(bytesToHash, 0, bytesToHash.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public static int hash(byte[] bytes, int offset, int length) {
|
||||||
public int hash(String type, String id) {
|
return StringHelper.murmurhash3_x86_32(bytes, offset, length, 0);
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
@ -47,7 +46,6 @@ import java.util.Set;
|
|||||||
public class OperationRouting extends AbstractComponent {
|
public class OperationRouting extends AbstractComponent {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
private final AwarenessAllocationDecider awarenessAllocationDecider;
|
private final AwarenessAllocationDecider awarenessAllocationDecider;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
@ -196,9 +194,9 @@ public class OperationRouting extends AbstractComponent {
|
|||||||
// if not, then use it as the index
|
// if not, then use it as the index
|
||||||
String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes();
|
String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes();
|
||||||
if (awarenessAttributes.length == 0) {
|
if (awarenessAttributes.length == 0) {
|
||||||
return indexShard.activeInitializingShardsIt(DjbHashFunction.DJB_HASH(preference));
|
return indexShard.activeInitializingShardsIt(Murmur3HashFunction.hash(preference));
|
||||||
} else {
|
} else {
|
||||||
return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, DjbHashFunction.DJB_HASH(preference));
|
return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, Murmur3HashFunction.hash(preference));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,37 +235,13 @@ public class OperationRouting extends AbstractComponent {
|
|||||||
@SuppressForbidden(reason = "Math#abs is trappy")
|
@SuppressForbidden(reason = "Math#abs is trappy")
|
||||||
private int shardId(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
|
private int shardId(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
|
||||||
final IndexMetaData indexMetaData = indexMetaData(clusterState, index);
|
final IndexMetaData indexMetaData = indexMetaData(clusterState, index);
|
||||||
final Version createdVersion = indexMetaData.getCreationVersion();
|
|
||||||
final HashFunction hashFunction = indexMetaData.getRoutingHashFunction();
|
|
||||||
final boolean useType = indexMetaData.getRoutingUseType();
|
|
||||||
|
|
||||||
final int hash;
|
final int hash;
|
||||||
if (routing == null) {
|
if (routing == null) {
|
||||||
if (!useType) {
|
hash = Murmur3HashFunction.hash(id);
|
||||||
hash = hash(hashFunction, id);
|
|
||||||
} else {
|
|
||||||
hash = hash(hashFunction, type, id);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
hash = hash(hashFunction, routing);
|
hash = Murmur3HashFunction.hash(routing);
|
||||||
}
|
}
|
||||||
if (createdVersion.onOrAfter(Version.V_2_0_0_beta1)) {
|
return MathUtils.mod(hash, indexMetaData.numberOfShards());
|
||||||
return MathUtils.mod(hash, indexMetaData.numberOfShards());
|
|
||||||
} else {
|
|
||||||
return Math.abs(hash % indexMetaData.numberOfShards());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected int hash(HashFunction hashFunction, String routing) {
|
|
||||||
return hashFunction.hash(routing);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Deprecated
|
|
||||||
protected int hash(HashFunction hashFunction, String type, String id) {
|
|
||||||
if (type == null || "_all".equals(type)) {
|
|
||||||
throw new IllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)");
|
|
||||||
}
|
|
||||||
return hashFunction.hash(type, id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
|
private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
|
||||||
|
@ -19,13 +19,10 @@
|
|||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
import org.elasticsearch.common.collect.Iterators;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
|
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
|
||||||
@ -51,7 +48,7 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<ShardRouting> iterator() {
|
public Iterator<ShardRouting> iterator() {
|
||||||
return Iterators.unmodifiableIterator(shards.iterator());
|
return Collections.unmodifiableCollection(shards).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator<ShardRouting> mutableIterator() {
|
Iterator<ShardRouting> mutableIterator() {
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
|
|||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
import org.apache.lucene.util.CollectionUtil;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
@ -153,7 +152,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<RoutingNode> iterator() {
|
public Iterator<RoutingNode> iterator() {
|
||||||
return Iterators.unmodifiableIterator(nodesToShards.values().iterator());
|
return Collections.unmodifiableCollection(nodesToShards.values()).iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
public RoutingTable routingTable() {
|
public RoutingTable routingTable() {
|
||||||
|
690
core/src/main/java/org/elasticsearch/common/cache/Cache.java
vendored
Normal file
690
core/src/main/java/org/elasticsearch/common/cache/Cache.java
vendored
Normal file
@ -0,0 +1,690 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.cache;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||||
|
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.atomic.LongAdder;
|
||||||
|
import java.util.concurrent.locks.ReadWriteLock;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
import java.util.function.ToLongBiFunction;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A simple concurrent cache.
|
||||||
|
* <p>
|
||||||
|
* Cache is a simple concurrent cache that supports time-based and weight-based evictions, with notifications for all
|
||||||
|
* evictions. The design goals for this cache were simplicity and read performance. This means that we are willing to
|
||||||
|
* accept reduced write performance in exchange for easy-to-understand code. Cache statistics for hits, misses and
|
||||||
|
* evictions are exposed.
|
||||||
|
* <p>
|
||||||
|
* The design of the cache is relatively simple. The cache is segmented into 256 segments which are backed by HashMaps.
|
||||||
|
* Each segment is protected by a re-entrant read/write lock. The read/write locks permit multiple concurrent readers
|
||||||
|
* without contention, and the segments gives us write throughput without impacting readers (so readers are blocked only
|
||||||
|
* if they are reading a segment that a writer is writing to).
|
||||||
|
* <p>
|
||||||
|
* The LRU functionality is backed by a single doubly-linked list chaining the entries in order of insertion. This
|
||||||
|
* LRU list is protected by a lock that serializes all writes to it. There are opportunities for improvements
|
||||||
|
* here if write throughput is a concern.
|
||||||
|
* <ol>
|
||||||
|
* <li>LRU list mutations could be inserted into a blocking queue that a single thread is reading from
|
||||||
|
* and applying to the LRU list.</li>
|
||||||
|
* <li>Promotions could be deferred for entries that were "recently" promoted.</li>
|
||||||
|
* <li>Locks on the list could be taken per node being modified instead of globally.</li>
|
||||||
|
* </ol>
|
||||||
|
* <p>
|
||||||
|
* Evictions only occur after a mutation to the cache (meaning an entry promotion, a cache insertion, or a manual
|
||||||
|
* invalidation) or an explicit call to {@link #refresh()}.
|
||||||
|
*
|
||||||
|
* @param <K> The type of the keys
|
||||||
|
* @param <V> The type of the values
|
||||||
|
*/
|
||||||
|
public class Cache<K, V> {
|
||||||
|
// positive if entries have an expiration
|
||||||
|
private long expireAfterAccess = -1;
|
||||||
|
|
||||||
|
// true if entries can expire after access
|
||||||
|
private boolean entriesExpireAfterAccess;
|
||||||
|
|
||||||
|
// positive if entries have an expiration after write
|
||||||
|
private long expireAfterWrite = -1;
|
||||||
|
|
||||||
|
// true if entries can expire after initial insertion
|
||||||
|
private boolean entriesExpireAfterWrite;
|
||||||
|
|
||||||
|
// the number of entries in the cache
|
||||||
|
private int count = 0;
|
||||||
|
|
||||||
|
// the weight of the entries in the cache
|
||||||
|
private long weight = 0;
|
||||||
|
|
||||||
|
// the maximum weight that this cache supports
|
||||||
|
private long maximumWeight = -1;
|
||||||
|
|
||||||
|
// the weigher of entries
|
||||||
|
private ToLongBiFunction<K, V> weigher = (k, v) -> 1;
|
||||||
|
|
||||||
|
// the removal callback
|
||||||
|
private RemovalListener<K, V> removalListener = notification -> {
|
||||||
|
};
|
||||||
|
|
||||||
|
// use CacheBuilder to construct
|
||||||
|
Cache() {
|
||||||
|
}
|
||||||
|
|
||||||
|
void setExpireAfterAccess(long expireAfterAccess) {
|
||||||
|
if (expireAfterAccess <= 0) {
|
||||||
|
throw new IllegalArgumentException("expireAfterAccess <= 0");
|
||||||
|
}
|
||||||
|
this.expireAfterAccess = expireAfterAccess;
|
||||||
|
this.entriesExpireAfterAccess = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setExpireAfterWrite(long expireAfterWrite) {
|
||||||
|
if (expireAfterWrite <= 0) {
|
||||||
|
throw new IllegalArgumentException("expireAfterWrite <= 0");
|
||||||
|
}
|
||||||
|
this.expireAfterWrite = expireAfterWrite;
|
||||||
|
this.entriesExpireAfterWrite = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setMaximumWeight(long maximumWeight) {
|
||||||
|
if (maximumWeight < 0) {
|
||||||
|
throw new IllegalArgumentException("maximumWeight < 0");
|
||||||
|
}
|
||||||
|
this.maximumWeight = maximumWeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setWeigher(ToLongBiFunction<K, V> weigher) {
|
||||||
|
Objects.requireNonNull(weigher);
|
||||||
|
this.weigher = weigher;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setRemovalListener(RemovalListener<K, V> removalListener) {
|
||||||
|
Objects.requireNonNull(removalListener);
|
||||||
|
this.removalListener = removalListener;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The relative time used to track time-based evictions.
|
||||||
|
*
|
||||||
|
* @return the current relative time
|
||||||
|
*/
|
||||||
|
protected long now() {
|
||||||
|
// System.nanoTime takes non-negligible time, so we only use it if we need it
|
||||||
|
// use System.nanoTime because we want relative time, not absolute time
|
||||||
|
return entriesExpireAfterAccess || entriesExpireAfterWrite ? System.nanoTime() : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the state of an entry in the LRU list
|
||||||
|
enum State {
|
||||||
|
NEW, EXISTING, DELETED
|
||||||
|
}
|
||||||
|
|
||||||
|
static class Entry<K, V> {
|
||||||
|
final K key;
|
||||||
|
final V value;
|
||||||
|
long writeTime;
|
||||||
|
volatile long accessTime;
|
||||||
|
Entry<K, V> before;
|
||||||
|
Entry<K, V> after;
|
||||||
|
State state = State.NEW;
|
||||||
|
|
||||||
|
public Entry(K key, V value, long writeTime) {
|
||||||
|
this.key = key;
|
||||||
|
this.value = value;
|
||||||
|
this.writeTime = this.accessTime = writeTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A cache segment.
|
||||||
|
* <p>
|
||||||
|
* A CacheSegment is backed by a HashMap and is protected by a read/write lock.
|
||||||
|
*
|
||||||
|
* @param <K> the type of the keys
|
||||||
|
* @param <V> the type of the values
|
||||||
|
*/
|
||||||
|
private static class CacheSegment<K, V> {
|
||||||
|
// read/write lock protecting mutations to the segment
|
||||||
|
ReadWriteLock segmentLock = new ReentrantReadWriteLock();
|
||||||
|
|
||||||
|
ReleasableLock readLock = new ReleasableLock(segmentLock.readLock());
|
||||||
|
ReleasableLock writeLock = new ReleasableLock(segmentLock.writeLock());
|
||||||
|
|
||||||
|
Map<K, Entry<K, V>> map = new HashMap<>();
|
||||||
|
SegmentStats segmentStats = new SegmentStats();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get an entry from the segment
|
||||||
|
*
|
||||||
|
* @param key the key of the entry to get from the cache
|
||||||
|
* @param now the access time of this entry
|
||||||
|
* @return the entry if there was one, otherwise null
|
||||||
|
*/
|
||||||
|
Entry<K, V> get(K key, long now) {
|
||||||
|
Entry<K, V> entry;
|
||||||
|
try (ReleasableLock ignored = readLock.acquire()) {
|
||||||
|
entry = map.get(key);
|
||||||
|
}
|
||||||
|
if (entry != null) {
|
||||||
|
segmentStats.hit();
|
||||||
|
entry.accessTime = now;
|
||||||
|
} else {
|
||||||
|
segmentStats.miss();
|
||||||
|
}
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* put an entry into the segment
|
||||||
|
*
|
||||||
|
* @param key the key of the entry to add to the cache
|
||||||
|
* @param value the value of the entry to add to the cache
|
||||||
|
* @param now the access time of this entry
|
||||||
|
* @return a tuple of the new entry and the existing entry, if there was one otherwise null
|
||||||
|
*/
|
||||||
|
Tuple<Entry<K, V>, Entry<K, V>> put(K key, V value, long now) {
|
||||||
|
Entry<K, V> entry = new Entry<>(key, value, now);
|
||||||
|
Entry<K, V> existing;
|
||||||
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
|
existing = map.put(key, entry);
|
||||||
|
}
|
||||||
|
return Tuple.tuple(entry, existing);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* remove an entry from the segment
|
||||||
|
*
|
||||||
|
* @param key the key of the entry to remove from the cache
|
||||||
|
* @return the removed entry if there was one, otherwise null
|
||||||
|
*/
|
||||||
|
Entry<K, V> remove(K key) {
|
||||||
|
Entry<K, V> entry;
|
||||||
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
|
entry = map.remove(key);
|
||||||
|
}
|
||||||
|
if (entry != null) {
|
||||||
|
segmentStats.eviction();
|
||||||
|
}
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class SegmentStats {
|
||||||
|
private final LongAdder hits = new LongAdder();
|
||||||
|
private final LongAdder misses = new LongAdder();
|
||||||
|
private final LongAdder evictions = new LongAdder();
|
||||||
|
|
||||||
|
void hit() {
|
||||||
|
hits.increment();
|
||||||
|
}
|
||||||
|
|
||||||
|
void miss() {
|
||||||
|
misses.increment();
|
||||||
|
}
|
||||||
|
|
||||||
|
void eviction() {
|
||||||
|
evictions.increment();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final int NUMBER_OF_SEGMENTS = 256;
|
||||||
|
private final CacheSegment<K, V>[] segments = new CacheSegment[NUMBER_OF_SEGMENTS];
|
||||||
|
|
||||||
|
{
|
||||||
|
for (int i = 0; i < segments.length; i++) {
|
||||||
|
segments[i] = new CacheSegment<>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Entry<K, V> head;
|
||||||
|
Entry<K, V> tail;
|
||||||
|
|
||||||
|
// lock protecting mutations to the LRU list
|
||||||
|
private ReleasableLock lruLock = new ReleasableLock(new ReentrantLock());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.
|
||||||
|
*
|
||||||
|
* @param key the key whose associated value is to be returned
|
||||||
|
* @return the value to which the specified key is mapped, or null if this map contains no mapping for the key
|
||||||
|
*/
|
||||||
|
public V get(K key) {
|
||||||
|
return get(key, now());
|
||||||
|
}
|
||||||
|
|
||||||
|
private V get(K key, long now) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(key);
|
||||||
|
Entry<K, V> entry = segment.get(key, now);
|
||||||
|
if (entry == null || isExpired(entry, now)) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
promote(entry, now);
|
||||||
|
return entry.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the specified key is not already associated with a value (or is mapped to null), attempts to compute its
|
||||||
|
* value using the given mapping function and enters it into this map unless null.
|
||||||
|
*
|
||||||
|
* @param key the key whose associated value is to be returned or computed for if non-existant
|
||||||
|
* @param loader the function to compute a value given a key
|
||||||
|
* @return the current (existing or computed) value associated with the specified key, or null if the computed
|
||||||
|
* value is null
|
||||||
|
* @throws ExecutionException thrown if loader throws an exception
|
||||||
|
*/
|
||||||
|
public V computeIfAbsent(K key, CacheLoader<K, V> loader) throws ExecutionException {
|
||||||
|
long now = now();
|
||||||
|
V value = get(key, now);
|
||||||
|
if (value == null) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(key);
|
||||||
|
// we synchronize against the segment lock; this is to avoid a scenario where another thread is inserting
|
||||||
|
// a value for the same key via put which would not be observed on this thread without a mechanism
|
||||||
|
// synchronizing the two threads; it is possible that the segment lock will be too expensive here (it blocks
|
||||||
|
// readers too!) so consider this as a possible place to optimize should contention be observed
|
||||||
|
try (ReleasableLock ignored = segment.writeLock.acquire()) {
|
||||||
|
value = get(key, now);
|
||||||
|
if (value == null) {
|
||||||
|
try {
|
||||||
|
value = loader.load(key);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new ExecutionException(e);
|
||||||
|
}
|
||||||
|
if (value == null) {
|
||||||
|
throw new ExecutionException(new NullPointerException("loader returned a null value"));
|
||||||
|
}
|
||||||
|
put(key, value, now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Associates the specified value with the specified key in this map. If the map previously contained a mapping for
|
||||||
|
* the key, the old value is replaced.
|
||||||
|
*
|
||||||
|
* @param key key with which the specified value is to be associated
|
||||||
|
* @param value value to be associated with the specified key
|
||||||
|
*/
|
||||||
|
public void put(K key, V value) {
|
||||||
|
long now = now();
|
||||||
|
put(key, value, now);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void put(K key, V value, long now) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(key);
|
||||||
|
Tuple<Entry<K, V>, Entry<K, V>> tuple = segment.put(key, value, now);
|
||||||
|
boolean replaced = false;
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
if (tuple.v2() != null && tuple.v2().state == State.EXISTING) {
|
||||||
|
if (unlink(tuple.v2())) {
|
||||||
|
replaced = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
promote(tuple.v1(), now);
|
||||||
|
}
|
||||||
|
if (replaced) {
|
||||||
|
removalListener.onRemoval(new RemovalNotification(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Invalidate the association for the specified key. A removal notification will be issued for invalidated
|
||||||
|
* entries with {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED.
|
||||||
|
*
|
||||||
|
* @param key the key whose mapping is to be invalidated from the cache
|
||||||
|
*/
|
||||||
|
public void invalidate(K key) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(key);
|
||||||
|
Entry<K, V> entry = segment.remove(key);
|
||||||
|
if (entry != null) {
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
delete(entry, RemovalNotification.RemovalReason.INVALIDATED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Invalidate all cache entries. A removal notification will be issued for invalidated entries with
|
||||||
|
* {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED.
|
||||||
|
*/
|
||||||
|
public void invalidateAll() {
|
||||||
|
Entry<K, V> h;
|
||||||
|
|
||||||
|
boolean[] haveSegmentLock = new boolean[NUMBER_OF_SEGMENTS];
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < NUMBER_OF_SEGMENTS; i++) {
|
||||||
|
segments[i].segmentLock.writeLock().lock();
|
||||||
|
haveSegmentLock[i] = true;
|
||||||
|
}
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
h = head;
|
||||||
|
Arrays.stream(segments).forEach(segment -> segment.map = new HashMap<>());
|
||||||
|
Entry<K, V> current = head;
|
||||||
|
while (current != null) {
|
||||||
|
current.state = State.DELETED;
|
||||||
|
current = current.after;
|
||||||
|
}
|
||||||
|
head = tail = null;
|
||||||
|
count = 0;
|
||||||
|
weight = 0;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
for (int i = NUMBER_OF_SEGMENTS - 1; i >= 0; i--) {
|
||||||
|
if (haveSegmentLock[i]) {
|
||||||
|
segments[i].segmentLock.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (h != null) {
|
||||||
|
removalListener.onRemoval(new RemovalNotification<>(h.key, h.value, RemovalNotification.RemovalReason.INVALIDATED));
|
||||||
|
h = h.after;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Force any outstanding size-based and time-based evictions to occur
|
||||||
|
*/
|
||||||
|
public void refresh() {
|
||||||
|
long now = now();
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
evict(now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of entries in the cache.
|
||||||
|
*
|
||||||
|
* @return the number of entries in the cache
|
||||||
|
*/
|
||||||
|
public int count() {
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The weight of the entries in the cache.
|
||||||
|
*
|
||||||
|
* @return the weight of the entries in the cache
|
||||||
|
*/
|
||||||
|
public long weight() {
|
||||||
|
return weight;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An LRU sequencing of the keys in the cache that supports removal. This sequence is not protected from mutations
|
||||||
|
* to the cache (except for {@link Iterator#remove()}. The result of iteration under any other mutation is
|
||||||
|
* undefined.
|
||||||
|
*
|
||||||
|
* @return an LRU-ordered {@link Iterable} over the keys in the cache
|
||||||
|
*/
|
||||||
|
public Iterable<K> keys() {
|
||||||
|
return () -> new Iterator<K>() {
|
||||||
|
private CacheIterator iterator = new CacheIterator(head);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return iterator.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public K next() {
|
||||||
|
return iterator.next().key;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void remove() {
|
||||||
|
iterator.remove();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An LRU sequencing of the values in the cache. This sequence is not protected from mutations
|
||||||
|
* to the cache. The result of iteration under mutation is undefined.
|
||||||
|
*
|
||||||
|
* @return an LRU-ordered {@link Iterable} over the values in the cache
|
||||||
|
*/
|
||||||
|
public Iterable<V> values() {
|
||||||
|
return () -> new Iterator<V>() {
|
||||||
|
private CacheIterator iterator = new CacheIterator(head);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return iterator.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public V next() {
|
||||||
|
return iterator.next().value;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private class CacheIterator implements Iterator<Entry<K, V>> {
|
||||||
|
private Entry<K, V> current;
|
||||||
|
private Entry<K, V> next;
|
||||||
|
|
||||||
|
CacheIterator(Entry<K, V> head) {
|
||||||
|
current = null;
|
||||||
|
next = head;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return next != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Entry<K, V> next() {
|
||||||
|
current = next;
|
||||||
|
next = next.after;
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void remove() {
|
||||||
|
Entry<K, V> entry = current;
|
||||||
|
if (entry != null) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(entry.key);
|
||||||
|
segment.remove(entry.key);
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
current = null;
|
||||||
|
delete(entry, RemovalNotification.RemovalReason.INVALIDATED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The cache statistics tracking hits, misses and evictions. These are taken on a best-effort basis meaning that
|
||||||
|
* they could be out-of-date mid-flight.
|
||||||
|
*
|
||||||
|
* @return the current cache statistics
|
||||||
|
*/
|
||||||
|
public CacheStats stats() {
|
||||||
|
long hits = 0;
|
||||||
|
long misses = 0;
|
||||||
|
long evictions = 0;
|
||||||
|
for (int i = 0; i < segments.length; i++) {
|
||||||
|
hits += segments[i].segmentStats.hits.longValue();
|
||||||
|
misses += segments[i].segmentStats.misses.longValue();
|
||||||
|
evictions += segments[i].segmentStats.evictions.longValue();
|
||||||
|
}
|
||||||
|
return new CacheStats(hits, misses, evictions);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class CacheStats {
|
||||||
|
private long hits;
|
||||||
|
private long misses;
|
||||||
|
private long evictions;
|
||||||
|
|
||||||
|
public CacheStats(long hits, long misses, long evictions) {
|
||||||
|
this.hits = hits;
|
||||||
|
this.misses = misses;
|
||||||
|
this.evictions = evictions;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getHits() {
|
||||||
|
return hits;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getMisses() {
|
||||||
|
return misses;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getEvictions() {
|
||||||
|
return evictions;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean promote(Entry<K, V> entry, long now) {
|
||||||
|
boolean promoted = true;
|
||||||
|
try (ReleasableLock ignored = lruLock.acquire()) {
|
||||||
|
switch (entry.state) {
|
||||||
|
case DELETED:
|
||||||
|
promoted = false;
|
||||||
|
break;
|
||||||
|
case EXISTING:
|
||||||
|
relinkAtHead(entry);
|
||||||
|
break;
|
||||||
|
case NEW:
|
||||||
|
linkAtHead(entry);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (promoted) {
|
||||||
|
evict(now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return promoted;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void evict(long now) {
|
||||||
|
assert lruLock.isHeldByCurrentThread();
|
||||||
|
|
||||||
|
while (tail != null && shouldPrune(tail, now)) {
|
||||||
|
CacheSegment<K, V> segment = getCacheSegment(tail.key);
|
||||||
|
Entry<K, V> entry = tail;
|
||||||
|
if (segment != null) {
|
||||||
|
segment.remove(tail.key);
|
||||||
|
}
|
||||||
|
delete(entry, RemovalNotification.RemovalReason.EVICTED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void delete(Entry<K, V> entry, RemovalNotification.RemovalReason removalReason) {
|
||||||
|
assert lruLock.isHeldByCurrentThread();
|
||||||
|
|
||||||
|
if (unlink(entry)) {
|
||||||
|
removalListener.onRemoval(new RemovalNotification<>(entry.key, entry.value, removalReason));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean shouldPrune(Entry<K, V> entry, long now) {
|
||||||
|
return exceedsWeight() || isExpired(entry, now);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean exceedsWeight() {
|
||||||
|
return maximumWeight != -1 && weight > maximumWeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isExpired(Entry<K, V> entry, long now) {
|
||||||
|
return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) ||
|
||||||
|
(entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean unlink(Entry<K, V> entry) {
|
||||||
|
assert lruLock.isHeldByCurrentThread();
|
||||||
|
|
||||||
|
if (entry.state == State.EXISTING) {
|
||||||
|
final Entry<K, V> before = entry.before;
|
||||||
|
final Entry<K, V> after = entry.after;
|
||||||
|
|
||||||
|
if (before == null) {
|
||||||
|
// removing the head
|
||||||
|
assert head == entry;
|
||||||
|
head = after;
|
||||||
|
if (head != null) {
|
||||||
|
head.before = null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// removing inner element
|
||||||
|
before.after = after;
|
||||||
|
entry.before = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (after == null) {
|
||||||
|
// removing tail
|
||||||
|
assert tail == entry;
|
||||||
|
tail = before;
|
||||||
|
if (tail != null) {
|
||||||
|
tail.after = null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// removing inner element
|
||||||
|
after.before = before;
|
||||||
|
entry.after = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
count--;
|
||||||
|
weight -= weigher.applyAsLong(entry.key, entry.value);
|
||||||
|
entry.state = State.DELETED;
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void linkAtHead(Entry<K, V> entry) {
|
||||||
|
assert lruLock.isHeldByCurrentThread();
|
||||||
|
|
||||||
|
Entry<K, V> h = head;
|
||||||
|
entry.before = null;
|
||||||
|
entry.after = head;
|
||||||
|
head = entry;
|
||||||
|
if (h == null) {
|
||||||
|
tail = entry;
|
||||||
|
} else {
|
||||||
|
h.before = entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
count++;
|
||||||
|
weight += weigher.applyAsLong(entry.key, entry.value);
|
||||||
|
entry.state = State.EXISTING;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void relinkAtHead(Entry<K, V> entry) {
|
||||||
|
assert lruLock.isHeldByCurrentThread();
|
||||||
|
|
||||||
|
if (head != entry) {
|
||||||
|
unlink(entry);
|
||||||
|
linkAtHead(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private CacheSegment<K, V> getCacheSegment(K key) {
|
||||||
|
return segments[key.hashCode() & 0xff];
|
||||||
|
}
|
||||||
|
}
|
94
core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java
vendored
Normal file
94
core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java
vendored
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.cache;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.function.ToLongBiFunction;
|
||||||
|
|
||||||
|
public class CacheBuilder<K, V> {
|
||||||
|
private long maximumWeight = -1;
|
||||||
|
private long expireAfterAccess = -1;
|
||||||
|
private long expireAfterWrite = -1;
|
||||||
|
private ToLongBiFunction<K, V> weigher;
|
||||||
|
private RemovalListener<K, V> removalListener;
|
||||||
|
|
||||||
|
public static <K, V> CacheBuilder<K, V> builder() {
|
||||||
|
return new CacheBuilder<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
private CacheBuilder() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public CacheBuilder<K, V> setMaximumWeight(long maximumWeight) {
|
||||||
|
if (maximumWeight < 0) {
|
||||||
|
throw new IllegalArgumentException("maximumWeight < 0");
|
||||||
|
}
|
||||||
|
this.maximumWeight = maximumWeight;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CacheBuilder<K, V> setExpireAfterAccess(long expireAfterAccess) {
|
||||||
|
if (expireAfterAccess <= 0) {
|
||||||
|
throw new IllegalArgumentException("expireAfterAccess <= 0");
|
||||||
|
}
|
||||||
|
this.expireAfterAccess = expireAfterAccess;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CacheBuilder<K, V> setExpireAfterWrite(long expireAfterWrite) {
|
||||||
|
if (expireAfterWrite <= 0) {
|
||||||
|
throw new IllegalArgumentException("expireAfterWrite <= 0");
|
||||||
|
}
|
||||||
|
this.expireAfterWrite = expireAfterWrite;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CacheBuilder<K, V> weigher(ToLongBiFunction<K, V> weigher) {
|
||||||
|
Objects.requireNonNull(weigher);
|
||||||
|
this.weigher = weigher;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CacheBuilder<K, V> removalListener(RemovalListener<K, V> removalListener) {
|
||||||
|
Objects.requireNonNull(removalListener);
|
||||||
|
this.removalListener = removalListener;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Cache<K, V> build() {
|
||||||
|
Cache<K, V> cache = new Cache();
|
||||||
|
if (maximumWeight != -1) {
|
||||||
|
cache.setMaximumWeight(maximumWeight);
|
||||||
|
}
|
||||||
|
if (expireAfterAccess != -1) {
|
||||||
|
cache.setExpireAfterAccess(expireAfterAccess);
|
||||||
|
}
|
||||||
|
if (expireAfterWrite != -1) {
|
||||||
|
cache.setExpireAfterWrite(expireAfterWrite);
|
||||||
|
}
|
||||||
|
if (weigher != null) {
|
||||||
|
cache.setWeigher(weigher);
|
||||||
|
}
|
||||||
|
if (removalListener != null) {
|
||||||
|
cache.setRemovalListener(removalListener);
|
||||||
|
}
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
}
|
@ -17,20 +17,9 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.common.cache;
|
||||||
|
|
||||||
/**
|
@FunctionalInterface
|
||||||
* This class implements a simple hash function based on Java Build-In {@link Object#hashCode()}
|
public interface CacheLoader<K, V> {
|
||||||
*/
|
V load(K key) throws Exception;
|
||||||
public class SimpleHashFunction implements HashFunction {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hash(String routing) {
|
|
||||||
return routing.hashCode();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hash(String type, String id) {
|
|
||||||
return type.hashCode() + 31 * id.hashCode();
|
|
||||||
}
|
|
||||||
}
|
}
|
@ -17,23 +17,9 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index;
|
package org.elasticsearch.common.cache;
|
||||||
|
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
@FunctionalInterface
|
||||||
|
public interface RemovalListener<K, V> {
|
||||||
/**
|
void onRemoval(RemovalNotification<K, V> notification);
|
||||||
*
|
}
|
||||||
*/
|
|
||||||
public class LocalNodeIdModule extends AbstractModule {
|
|
||||||
|
|
||||||
private final String localNodeId;
|
|
||||||
|
|
||||||
public LocalNodeIdModule(String localNodeId) {
|
|
||||||
this.localNodeId = localNodeId;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void configure() {
|
|
||||||
bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId);
|
|
||||||
}
|
|
||||||
}
|
|
@ -17,24 +17,30 @@
|
|||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index;
|
package org.elasticsearch.common.cache;
|
||||||
|
|
||||||
import org.elasticsearch.common.inject.BindingAnnotation;
|
public class RemovalNotification<K, V> {
|
||||||
|
public enum RemovalReason {REPLACED, INVALIDATED, EVICTED}
|
||||||
|
|
||||||
import java.lang.annotation.Documented;
|
private final K key;
|
||||||
import java.lang.annotation.Retention;
|
private final V value;
|
||||||
import java.lang.annotation.Target;
|
private final RemovalReason removalReason;
|
||||||
|
|
||||||
import static java.lang.annotation.ElementType.FIELD;
|
public RemovalNotification(K key, V value, RemovalReason removalReason) {
|
||||||
import static java.lang.annotation.ElementType.PARAMETER;
|
this.key = key;
|
||||||
import static java.lang.annotation.RetentionPolicy.RUNTIME;
|
this.value = value;
|
||||||
|
this.removalReason = removalReason;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
public K getKey() {
|
||||||
*
|
return key;
|
||||||
*/
|
}
|
||||||
@BindingAnnotation
|
|
||||||
@Target({FIELD, PARAMETER})
|
public V getValue() {
|
||||||
@Retention(RUNTIME)
|
return value;
|
||||||
@Documented
|
}
|
||||||
public @interface LocalNodeId {
|
|
||||||
|
public RemovalReason getRemovalReason() {
|
||||||
|
return removalReason;
|
||||||
|
}
|
||||||
}
|
}
|
@ -0,0 +1,176 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.collect;
|
||||||
|
|
||||||
|
import java.util.ArrayDeque;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Queue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An {@code EvictingQueue} is a non-blocking queue which is limited to a maximum size; when new elements are added to a
|
||||||
|
* full queue, elements are evicted from the head of the queue to accommodate the new elements.
|
||||||
|
*
|
||||||
|
* @param <T> The type of elements in the queue.
|
||||||
|
*/
|
||||||
|
public class EvictingQueue<T> implements Queue<T> {
|
||||||
|
private final int maximumSize;
|
||||||
|
private final ArrayDeque<T> queue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct a new {@code EvictingQueue} that holds {@code maximumSize} elements.
|
||||||
|
*
|
||||||
|
* @param maximumSize The maximum number of elements that the queue can hold
|
||||||
|
* @throws IllegalArgumentException if {@code maximumSize} is less than zero
|
||||||
|
*/
|
||||||
|
public EvictingQueue(int maximumSize) {
|
||||||
|
if (maximumSize < 0) {
|
||||||
|
throw new IllegalArgumentException("maximumSize < 0");
|
||||||
|
}
|
||||||
|
this.maximumSize = maximumSize;
|
||||||
|
this.queue = new ArrayDeque<>(maximumSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the number of additional elements that the queue can accommodate before evictions occur
|
||||||
|
*/
|
||||||
|
public int remainingCapacity() {
|
||||||
|
return this.maximumSize - this.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add the given element to the queue, possibly forcing an eviction from the head if {@link #remainingCapacity()} is
|
||||||
|
* zero.
|
||||||
|
*
|
||||||
|
* @param t the element to add
|
||||||
|
* @return true if the element was added (always the case for {@code EvictingQueue}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean add(T t) {
|
||||||
|
if (maximumSize == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (queue.size() == maximumSize) {
|
||||||
|
queue.remove();
|
||||||
|
}
|
||||||
|
queue.add(t);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @see #add(Object)
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean offer(T t) {
|
||||||
|
return add(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public T remove() {
|
||||||
|
return queue.remove();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public T poll() {
|
||||||
|
return queue.poll();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public T element() {
|
||||||
|
return queue.element();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public T peek() {
|
||||||
|
return queue.peek();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return queue.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() {
|
||||||
|
return queue.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean contains(Object o) {
|
||||||
|
return queue.contains(o);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterator<T> iterator() {
|
||||||
|
return queue.iterator();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object[] toArray() {
|
||||||
|
return queue.toArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T1> T1[] toArray(T1[] a) {
|
||||||
|
return queue.toArray(a);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean remove(Object o) {
|
||||||
|
return queue.remove(o);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean containsAll(Collection<?> c) {
|
||||||
|
return queue.containsAll(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add the given elements to the queue, possibly forcing evictions from the head if {@link #remainingCapacity()} is
|
||||||
|
* zero or becomes zero during the execution of this method.
|
||||||
|
*
|
||||||
|
* @param c the collection of elements to add
|
||||||
|
* @return true if any elements were added to the queue
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean addAll(Collection<? extends T> c) {
|
||||||
|
boolean modified = false;
|
||||||
|
for (T e : c)
|
||||||
|
if (add(e))
|
||||||
|
modified = true;
|
||||||
|
return modified;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean removeAll(Collection<?> c) {
|
||||||
|
return queue.removeAll(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean retainAll(Collection<?> c) {
|
||||||
|
return queue.retainAll(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clear() {
|
||||||
|
queue.clear();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.collect;
|
||||||
|
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.NoSuchElementException;
|
||||||
|
|
||||||
|
public class Iterators {
|
||||||
|
public static <T> Iterator<T> concat(Iterator<? extends T>... iterators) {
|
||||||
|
if (iterators == null) {
|
||||||
|
throw new NullPointerException("iterators");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ConcatenatedIterator<>(iterators);
|
||||||
|
}
|
||||||
|
|
||||||
|
static class ConcatenatedIterator<T> implements Iterator<T> {
|
||||||
|
private final Iterator<? extends T>[] iterators;
|
||||||
|
private int index = 0;
|
||||||
|
|
||||||
|
public ConcatenatedIterator(Iterator<? extends T>... iterators) {
|
||||||
|
if (iterators == null) {
|
||||||
|
throw new NullPointerException("iterators");
|
||||||
|
}
|
||||||
|
for (int i = 0; i < iterators.length; i++) {
|
||||||
|
if (iterators[i] == null) {
|
||||||
|
throw new NullPointerException("iterators[" + i + "]");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.iterators = iterators;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
boolean hasNext = false;
|
||||||
|
while (index < iterators.length && !(hasNext = iterators[index].hasNext())) {
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return hasNext;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public T next() {
|
||||||
|
if (!hasNext()) {
|
||||||
|
throw new NoSuchElementException();
|
||||||
|
}
|
||||||
|
return iterators[index].next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,12 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo;
|
package org.elasticsearch.common.geo;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.BitUtil;
|
import org.apache.lucene.util.BitUtil;
|
||||||
import org.apache.lucene.util.XGeoHashUtils;
|
import org.apache.lucene.util.XGeoHashUtils;
|
||||||
import org.apache.lucene.util.XGeoUtils;
|
import org.apache.lucene.util.XGeoUtils;
|
||||||
@ -32,15 +26,12 @@ import org.apache.lucene.util.XGeoUtils;
|
|||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public final class GeoPoint implements Writeable<GeoPoint> {
|
public final class GeoPoint {
|
||||||
|
|
||||||
private double lat;
|
private double lat;
|
||||||
private double lon;
|
private double lon;
|
||||||
private final static double TOLERANCE = XGeoUtils.TOLERANCE;
|
private final static double TOLERANCE = XGeoUtils.TOLERANCE;
|
||||||
|
|
||||||
// for serialization purposes
|
|
||||||
private static final GeoPoint PROTOTYPE = new GeoPoint(Double.NaN, Double.NaN);
|
|
||||||
|
|
||||||
public GeoPoint() {
|
public GeoPoint() {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,21 +170,4 @@ public final class GeoPoint implements Writeable<GeoPoint> {
|
|||||||
public static GeoPoint fromIndexLong(long indexLong) {
|
public static GeoPoint fromIndexLong(long indexLong) {
|
||||||
return new GeoPoint().resetFromIndexHash(indexLong);
|
return new GeoPoint().resetFromIndexHash(indexLong);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public GeoPoint readFrom(StreamInput in) throws IOException {
|
|
||||||
double lat = in.readDouble();
|
|
||||||
double lon = in.readDouble();
|
|
||||||
return new GeoPoint(lat, lon);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static GeoPoint readGeoPointFrom(StreamInput in) throws IOException {
|
|
||||||
return PROTOTYPE.readFrom(in);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
|
||||||
out.writeDouble(lat);
|
|
||||||
out.writeDouble(lon);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2010 Google Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.common.inject.internal;
|
package org.elasticsearch.common.inject.internal;
|
||||||
|
|
||||||
import java.lang.annotation.Annotation;
|
import java.lang.annotation.Annotation;
|
||||||
|
@ -19,9 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.io;
|
package org.elasticsearch.common.io;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
@ -35,6 +34,7 @@ import java.nio.file.*;
|
|||||||
import java.nio.file.attribute.BasicFileAttributes;
|
import java.nio.file.attribute.BasicFileAttributes;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.stream.StreamSupport;
|
||||||
|
|
||||||
import static java.nio.file.FileVisitResult.CONTINUE;
|
import static java.nio.file.FileVisitResult.CONTINUE;
|
||||||
import static java.nio.file.FileVisitResult.SKIP_SUBTREE;
|
import static java.nio.file.FileVisitResult.SKIP_SUBTREE;
|
||||||
@ -328,7 +328,7 @@ public final class FileSystemUtils {
|
|||||||
*/
|
*/
|
||||||
public static Path[] files(Path from, DirectoryStream.Filter<Path> filter) throws IOException {
|
public static Path[] files(Path from, DirectoryStream.Filter<Path> filter) throws IOException {
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from, filter)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from, filter)) {
|
||||||
return Iterators.toArray(stream.iterator(), Path.class);
|
return toArray(stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ public final class FileSystemUtils {
|
|||||||
*/
|
*/
|
||||||
public static Path[] files(Path directory) throws IOException {
|
public static Path[] files(Path directory) throws IOException {
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory)) {
|
||||||
return Iterators.toArray(stream.iterator(), Path.class);
|
return toArray(stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,8 +346,12 @@ public final class FileSystemUtils {
|
|||||||
*/
|
*/
|
||||||
public static Path[] files(Path directory, String glob) throws IOException {
|
public static Path[] files(Path directory, String glob) throws IOException {
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory, glob)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory, glob)) {
|
||||||
return Iterators.toArray(stream.iterator(), Path.class);
|
return toArray(stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static Path[] toArray(DirectoryStream<Path> stream) {
|
||||||
|
return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.Nullable;
|
|||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.text.StringAndBytesText;
|
import org.elasticsearch.common.text.StringAndBytesText;
|
||||||
import org.elasticsearch.common.text.Text;
|
import org.elasticsearch.common.text.Text;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
@ -336,19 +337,6 @@ public abstract class StreamInput extends InputStream {
|
|||||||
@Override
|
@Override
|
||||||
public abstract void close() throws IOException;
|
public abstract void close() throws IOException;
|
||||||
|
|
||||||
// // IS
|
|
||||||
//
|
|
||||||
// @Override public int read() throws IOException {
|
|
||||||
// return readByte();
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // Here, we assume that we always can read the full byte array
|
|
||||||
//
|
|
||||||
// @Override public int read(byte[] b, int off, int len) throws IOException {
|
|
||||||
// readBytes(b, off, len);
|
|
||||||
// return len;
|
|
||||||
// }
|
|
||||||
|
|
||||||
public String[] readStringArray() throws IOException {
|
public String[] readStringArray() throws IOException {
|
||||||
int size = readVInt();
|
int size = readVInt();
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
@ -449,11 +437,20 @@ public abstract class StreamInput extends InputStream {
|
|||||||
return readDoubleArray();
|
return readDoubleArray();
|
||||||
case 21:
|
case 21:
|
||||||
return readBytesRef();
|
return readBytesRef();
|
||||||
|
case 22:
|
||||||
|
return readGeoPoint();
|
||||||
default:
|
default:
|
||||||
throw new IOException("Can't read unknown type [" + type + "]");
|
throw new IOException("Can't read unknown type [" + type + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads a {@link GeoPoint} from this stream input
|
||||||
|
*/
|
||||||
|
public GeoPoint readGeoPoint() throws IOException {
|
||||||
|
return new GeoPoint(readDouble(), readDouble());
|
||||||
|
}
|
||||||
|
|
||||||
public int[] readIntArray() throws IOException {
|
public int[] readIntArray() throws IOException {
|
||||||
int length = readVInt();
|
int length = readVInt();
|
||||||
int[] values = new int[length];
|
int[] values = new int[length];
|
||||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.text.Text;
|
import org.elasticsearch.common.text.Text;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||||
@ -422,6 +423,9 @@ public abstract class StreamOutput extends OutputStream {
|
|||||||
} else if (value instanceof BytesRef) {
|
} else if (value instanceof BytesRef) {
|
||||||
writeByte((byte) 21);
|
writeByte((byte) 21);
|
||||||
writeBytesRef((BytesRef) value);
|
writeBytesRef((BytesRef) value);
|
||||||
|
} else if (type == GeoPoint.class) {
|
||||||
|
writeByte((byte) 22);
|
||||||
|
writeGeoPoint((GeoPoint) value);
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Can't write type [" + type + "]");
|
throw new IOException("Can't write type [" + type + "]");
|
||||||
}
|
}
|
||||||
@ -467,14 +471,6 @@ public abstract class StreamOutput extends OutputStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static int parseIntSafe(String val, int defaultVal) {
|
|
||||||
try {
|
|
||||||
return Integer.parseInt(val);
|
|
||||||
} catch (NumberFormatException ex) {
|
|
||||||
return defaultVal;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void writeThrowable(Throwable throwable) throws IOException {
|
public void writeThrowable(Throwable throwable) throws IOException {
|
||||||
if (throwable == null) {
|
if (throwable == null) {
|
||||||
writeBoolean(false);
|
writeBoolean(false);
|
||||||
@ -596,4 +592,12 @@ public abstract class StreamOutput extends OutputStream {
|
|||||||
public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException {
|
public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException {
|
||||||
writeNamedWriteable(scoreFunctionBuilder);
|
writeNamedWriteable(scoreFunctionBuilder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes the given {@link GeoPoint} to the stream
|
||||||
|
*/
|
||||||
|
public void writeGeoPoint(GeoPoint geoPoint) throws IOException {
|
||||||
|
writeDouble(geoPoint.lat());
|
||||||
|
writeDouble(geoPoint.lon());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,14 +46,11 @@ import org.elasticsearch.common.util.iterable.Iterables;
|
|||||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.text.ParseException;
|
import java.text.ParseException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
import static org.elasticsearch.common.lucene.search.NoopCollector.NOOP_COLLECTOR;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@ -229,27 +226,6 @@ public class Lucene {
|
|||||||
}.run();
|
}.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static long count(IndexSearcher searcher, Query query) throws IOException {
|
|
||||||
return searcher.count(query);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs a count on the <code>searcher</code> for <code>query</code>. Terminates
|
|
||||||
* early when the count has reached <code>terminateAfter</code>
|
|
||||||
*/
|
|
||||||
public static long count(IndexSearcher searcher, Query query, int terminateAfterCount) throws IOException {
|
|
||||||
EarlyTerminatingCollector countCollector = createCountBasedEarlyTerminatingCollector(terminateAfterCount);
|
|
||||||
countWithEarlyTermination(searcher, query, countCollector);
|
|
||||||
return countCollector.count();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates count based early termination collector with a threshold of <code>maxCountHits</code>
|
|
||||||
*/
|
|
||||||
public final static EarlyTerminatingCollector createCountBasedEarlyTerminatingCollector(int maxCountHits) {
|
|
||||||
return new EarlyTerminatingCollector(maxCountHits);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wraps <code>delegate</code> with count based early termination collector with a threshold of <code>maxCountHits</code>
|
* Wraps <code>delegate</code> with count based early termination collector with a threshold of <code>maxCountHits</code>
|
||||||
*/
|
*/
|
||||||
@ -265,99 +241,27 @@ public class Lucene {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs an exists (count > 0) query on the <code>searcher</code> for <code>query</code>
|
* Check whether there is one or more documents matching the provided query.
|
||||||
* with <code>filter</code> using the given <code>collector</code>
|
|
||||||
*
|
|
||||||
* The <code>collector</code> can be instantiated using <code>Lucene.createExistsCollector()</code>
|
|
||||||
*/
|
*/
|
||||||
public static boolean exists(IndexSearcher searcher, Query query, Filter filter,
|
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
|
||||||
EarlyTerminatingCollector collector) throws IOException {
|
final Weight weight = searcher.createNormalizedWeight(query, false);
|
||||||
collector.reset();
|
// the scorer API should be more efficient at stopping after the first
|
||||||
countWithEarlyTermination(searcher, filter, query, collector);
|
// match than the bulk scorer API
|
||||||
return collector.exists();
|
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
|
||||||
}
|
final Scorer scorer = weight.scorer(context);
|
||||||
|
if (scorer == null) {
|
||||||
|
continue;
|
||||||
/**
|
}
|
||||||
* Performs an exists (count > 0) query on the <code>searcher</code> for <code>query</code>
|
final Bits liveDocs = context.reader().getLiveDocs();
|
||||||
* using the given <code>collector</code>
|
for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) {
|
||||||
*
|
if (liveDocs == null || liveDocs.get(doc)) {
|
||||||
* The <code>collector</code> can be instantiated using <code>Lucene.createExistsCollector()</code>
|
return true;
|
||||||
*/
|
}
|
||||||
public static boolean exists(IndexSearcher searcher, Query query, EarlyTerminatingCollector collector) throws IOException {
|
|
||||||
collector.reset();
|
|
||||||
countWithEarlyTermination(searcher, query, collector);
|
|
||||||
return collector.exists();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calls <code>countWithEarlyTermination(searcher, null, query, collector)</code>
|
|
||||||
*/
|
|
||||||
public static boolean countWithEarlyTermination(IndexSearcher searcher, Query query,
|
|
||||||
EarlyTerminatingCollector collector) throws IOException {
|
|
||||||
return countWithEarlyTermination(searcher, null, query, collector);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs a count on <code>query</code> and <code>filter</code> with early termination using <code>searcher</code>.
|
|
||||||
* The early termination threshold is specified by the provided <code>collector</code>
|
|
||||||
*/
|
|
||||||
public static boolean countWithEarlyTermination(IndexSearcher searcher, Filter filter, Query query,
|
|
||||||
EarlyTerminatingCollector collector) throws IOException {
|
|
||||||
try {
|
|
||||||
if (filter == null) {
|
|
||||||
searcher.search(query, collector);
|
|
||||||
} else {
|
|
||||||
searcher.search(query, filter, collector);
|
|
||||||
}
|
}
|
||||||
} catch (EarlyTerminationException e) {
|
|
||||||
// early termination
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs an exists (count > 0) query on the searcher from the <code>searchContext</code> for <code>query</code>
|
|
||||||
* using the given <code>collector</code>
|
|
||||||
*
|
|
||||||
* The <code>collector</code> can be instantiated using <code>Lucene.createExistsCollector()</code>
|
|
||||||
*/
|
|
||||||
public static boolean exists(SearchContext searchContext, Query query, EarlyTerminatingCollector collector) throws IOException {
|
|
||||||
collector.reset();
|
|
||||||
try {
|
|
||||||
searchContext.searcher().search(query, collector);
|
|
||||||
} catch (EarlyTerminationException e) {
|
|
||||||
// ignore, just early termination...
|
|
||||||
} finally {
|
|
||||||
searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION);
|
|
||||||
}
|
|
||||||
return collector.exists();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates an {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminatingCollector}
|
|
||||||
* with a threshold of <code>1</code>
|
|
||||||
*/
|
|
||||||
public final static EarlyTerminatingCollector createExistsCollector() {
|
|
||||||
return createCountBasedEarlyTerminatingCollector(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Closes the index writer, returning <tt>false</tt> if it failed to close.
|
|
||||||
*/
|
|
||||||
public static boolean safeClose(IndexWriter writer) {
|
|
||||||
if (writer == null) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
writer.close();
|
|
||||||
return true;
|
|
||||||
} catch (Throwable e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static TopDocs readTopDocs(StreamInput in) throws IOException {
|
public static TopDocs readTopDocs(StreamInput in) throws IOException {
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
int totalHits = in.readVInt();
|
int totalHits = in.readVInt();
|
||||||
@ -612,19 +516,11 @@ public class Lucene {
|
|||||||
private int count = 0;
|
private int count = 0;
|
||||||
private LeafCollector leafCollector;
|
private LeafCollector leafCollector;
|
||||||
|
|
||||||
EarlyTerminatingCollector(int maxCountHits) {
|
|
||||||
this.maxCountHits = maxCountHits;
|
|
||||||
this.delegate = NOOP_COLLECTOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
EarlyTerminatingCollector(final Collector delegate, int maxCountHits) {
|
EarlyTerminatingCollector(final Collector delegate, int maxCountHits) {
|
||||||
this.maxCountHits = maxCountHits;
|
this.maxCountHits = maxCountHits;
|
||||||
this.delegate = (delegate == null) ? NOOP_COLLECTOR : delegate;
|
this.delegate = Objects.requireNonNull(delegate);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void reset() {
|
|
||||||
count = 0;
|
|
||||||
}
|
|
||||||
public int count() {
|
public int count() {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.lucene.search;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
|
||||||
import org.apache.lucene.search.Scorer;
|
|
||||||
import org.apache.lucene.search.SimpleCollector;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class NoopCollector extends SimpleCollector {
|
|
||||||
|
|
||||||
public static final NoopCollector NOOP_COLLECTOR = new NoopCollector();
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setScorer(Scorer scorer) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void collect(int doc) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doSetNextReader(LeafReaderContext context) throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean needsScores() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
@ -33,10 +33,24 @@ import java.util.concurrent.ConcurrentMap;
|
|||||||
/** Utility class to resolve the Lucene doc ID and version for a given uid. */
|
/** Utility class to resolve the Lucene doc ID and version for a given uid. */
|
||||||
public class Versions {
|
public class Versions {
|
||||||
|
|
||||||
public static final long MATCH_ANY = -3L; // Version was not specified by the user
|
/** used to indicate the write operation should succeed regardless of current version **/
|
||||||
|
public static final long MATCH_ANY = -3L;
|
||||||
|
|
||||||
|
/** indicates that the current document was not found in lucene and in the version map */
|
||||||
public static final long NOT_FOUND = -1L;
|
public static final long NOT_FOUND = -1L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used when the document is old and doesn't contain any version information in the index
|
||||||
|
* see {@link PerThreadIDAndVersionLookup#lookup(org.apache.lucene.util.BytesRef)}
|
||||||
|
*/
|
||||||
public static final long NOT_SET = -2L;
|
public static final long NOT_SET = -2L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used to indicate that the write operation should be executed if the document is currently deleted
|
||||||
|
* i.e., not found in the index and/or found as deleted (with version) in the version map
|
||||||
|
*/
|
||||||
|
public static final long MATCH_DELETED = -4L;
|
||||||
|
|
||||||
// TODO: is there somewhere else we can store these?
|
// TODO: is there somewhere else we can store these?
|
||||||
private static final ConcurrentMap<IndexReader, CloseableThreadLocal<PerThreadIDAndVersionLookup>> lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
private static final ConcurrentMap<IndexReader, CloseableThreadLocal<PerThreadIDAndVersionLookup>> lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||||
|
|
||||||
|
@ -0,0 +1,357 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2008 The Guava Authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.network;
|
||||||
|
|
||||||
|
import java.net.Inet4Address;
|
||||||
|
import java.net.Inet6Address;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Locale;
|
||||||
|
|
||||||
|
public class InetAddresses {
|
||||||
|
private static int IPV4_PART_COUNT = 4;
|
||||||
|
private static int IPV6_PART_COUNT = 8;
|
||||||
|
|
||||||
|
public static boolean isInetAddress(String ipString) {
|
||||||
|
return ipStringToBytes(ipString) != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte[] ipStringToBytes(String ipString) {
|
||||||
|
// Make a first pass to categorize the characters in this string.
|
||||||
|
boolean hasColon = false;
|
||||||
|
boolean hasDot = false;
|
||||||
|
for (int i = 0; i < ipString.length(); i++) {
|
||||||
|
char c = ipString.charAt(i);
|
||||||
|
if (c == '.') {
|
||||||
|
hasDot = true;
|
||||||
|
} else if (c == ':') {
|
||||||
|
if (hasDot) {
|
||||||
|
return null; // Colons must not appear after dots.
|
||||||
|
}
|
||||||
|
hasColon = true;
|
||||||
|
} else if (Character.digit(c, 16) == -1) {
|
||||||
|
return null; // Everything else must be a decimal or hex digit.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now decide which address family to parse.
|
||||||
|
if (hasColon) {
|
||||||
|
if (hasDot) {
|
||||||
|
ipString = convertDottedQuadToHex(ipString);
|
||||||
|
if (ipString == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return textToNumericFormatV6(ipString);
|
||||||
|
} else if (hasDot) {
|
||||||
|
return textToNumericFormatV4(ipString);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String convertDottedQuadToHex(String ipString) {
|
||||||
|
int lastColon = ipString.lastIndexOf(':');
|
||||||
|
String initialPart = ipString.substring(0, lastColon + 1);
|
||||||
|
String dottedQuad = ipString.substring(lastColon + 1);
|
||||||
|
byte[] quad = textToNumericFormatV4(dottedQuad);
|
||||||
|
if (quad == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
String penultimate = Integer.toHexString(((quad[0] & 0xff) << 8) | (quad[1] & 0xff));
|
||||||
|
String ultimate = Integer.toHexString(((quad[2] & 0xff) << 8) | (quad[3] & 0xff));
|
||||||
|
return initialPart + penultimate + ":" + ultimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte[] textToNumericFormatV4(String ipString) {
|
||||||
|
String[] address = ipString.split("\\.", IPV4_PART_COUNT + 1);
|
||||||
|
if (address.length != IPV4_PART_COUNT) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] bytes = new byte[IPV4_PART_COUNT];
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < bytes.length; i++) {
|
||||||
|
bytes[i] = parseOctet(address[i]);
|
||||||
|
}
|
||||||
|
} catch (NumberFormatException ex) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte parseOctet(String ipPart) {
|
||||||
|
// Note: we already verified that this string contains only hex digits.
|
||||||
|
int octet = Integer.parseInt(ipPart);
|
||||||
|
// Disallow leading zeroes, because no clear standard exists on
|
||||||
|
// whether these should be interpreted as decimal or octal.
|
||||||
|
if (octet > 255 || (ipPart.startsWith("0") && ipPart.length() > 1)) {
|
||||||
|
throw new NumberFormatException();
|
||||||
|
}
|
||||||
|
return (byte) octet;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte[] textToNumericFormatV6(String ipString) {
|
||||||
|
// An address can have [2..8] colons, and N colons make N+1 parts.
|
||||||
|
String[] parts = ipString.split(":", IPV6_PART_COUNT + 2);
|
||||||
|
if (parts.length < 3 || parts.length > IPV6_PART_COUNT + 1) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disregarding the endpoints, find "::" with nothing in between.
|
||||||
|
// This indicates that a run of zeroes has been skipped.
|
||||||
|
int skipIndex = -1;
|
||||||
|
for (int i = 1; i < parts.length - 1; i++) {
|
||||||
|
if (parts[i].length() == 0) {
|
||||||
|
if (skipIndex >= 0) {
|
||||||
|
return null; // Can't have more than one ::
|
||||||
|
}
|
||||||
|
skipIndex = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int partsHi; // Number of parts to copy from above/before the "::"
|
||||||
|
int partsLo; // Number of parts to copy from below/after the "::"
|
||||||
|
if (skipIndex >= 0) {
|
||||||
|
// If we found a "::", then check if it also covers the endpoints.
|
||||||
|
partsHi = skipIndex;
|
||||||
|
partsLo = parts.length - skipIndex - 1;
|
||||||
|
if (parts[0].length() == 0 && --partsHi != 0) {
|
||||||
|
return null; // ^: requires ^::
|
||||||
|
}
|
||||||
|
if (parts[parts.length - 1].length() == 0 && --partsLo != 0) {
|
||||||
|
return null; // :$ requires ::$
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, allocate the entire address to partsHi. The endpoints
|
||||||
|
// could still be empty, but parseHextet() will check for that.
|
||||||
|
partsHi = parts.length;
|
||||||
|
partsLo = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we found a ::, then we must have skipped at least one part.
|
||||||
|
// Otherwise, we must have exactly the right number of parts.
|
||||||
|
int partsSkipped = IPV6_PART_COUNT - (partsHi + partsLo);
|
||||||
|
if (!(skipIndex >= 0 ? partsSkipped >= 1 : partsSkipped == 0)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now parse the hextets into a byte array.
|
||||||
|
ByteBuffer rawBytes = ByteBuffer.allocate(2 * IPV6_PART_COUNT);
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < partsHi; i++) {
|
||||||
|
rawBytes.putShort(parseHextet(parts[i]));
|
||||||
|
}
|
||||||
|
for (int i = 0; i < partsSkipped; i++) {
|
||||||
|
rawBytes.putShort((short) 0);
|
||||||
|
}
|
||||||
|
for (int i = partsLo; i > 0; i--) {
|
||||||
|
rawBytes.putShort(parseHextet(parts[parts.length - i]));
|
||||||
|
}
|
||||||
|
} catch (NumberFormatException ex) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return rawBytes.array();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static short parseHextet(String ipPart) {
|
||||||
|
// Note: we already verified that this string contains only hex digits.
|
||||||
|
int hextet = Integer.parseInt(ipPart, 16);
|
||||||
|
if (hextet > 0xffff) {
|
||||||
|
throw new NumberFormatException();
|
||||||
|
}
|
||||||
|
return (short) hextet;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the string representation of an {@link InetAddress} suitable
|
||||||
|
* for inclusion in a URI.
|
||||||
|
*
|
||||||
|
* <p>For IPv4 addresses, this is identical to
|
||||||
|
* {@link InetAddress#getHostAddress()}, but for IPv6 addresses it
|
||||||
|
* compresses zeroes and surrounds the text with square brackets; for example
|
||||||
|
* {@code "[2001:db8::1]"}.
|
||||||
|
*
|
||||||
|
* <p>Per section 3.2.2 of
|
||||||
|
* <a target="_parent"
|
||||||
|
* href="http://tools.ietf.org/html/rfc3986#section-3.2.2"
|
||||||
|
* >http://tools.ietf.org/html/rfc3986</a>,
|
||||||
|
* a URI containing an IPv6 string literal is of the form
|
||||||
|
* {@code "http://[2001:db8::1]:8888/index.html"}.
|
||||||
|
*
|
||||||
|
* <p>Use of either {@link InetAddresses#toAddrString},
|
||||||
|
* {@link InetAddress#getHostAddress()}, or this method is recommended over
|
||||||
|
* {@link InetAddress#toString()} when an IP address string literal is
|
||||||
|
* desired. This is because {@link InetAddress#toString()} prints the
|
||||||
|
* hostname and the IP address string joined by a "/".
|
||||||
|
*
|
||||||
|
* @param ip {@link InetAddress} to be converted to URI string literal
|
||||||
|
* @return {@code String} containing URI-safe string literal
|
||||||
|
*/
|
||||||
|
public static String toUriString(InetAddress ip) {
|
||||||
|
if (ip instanceof Inet6Address) {
|
||||||
|
return "[" + toAddrString(ip) + "]";
|
||||||
|
}
|
||||||
|
return toAddrString(ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the string representation of an {@link InetAddress}.
|
||||||
|
*
|
||||||
|
* <p>For IPv4 addresses, this is identical to
|
||||||
|
* {@link InetAddress#getHostAddress()}, but for IPv6 addresses, the output
|
||||||
|
* follows <a href="http://tools.ietf.org/html/rfc5952">RFC 5952</a>
|
||||||
|
* section 4. The main difference is that this method uses "::" for zero
|
||||||
|
* compression, while Java's version uses the uncompressed form.
|
||||||
|
*
|
||||||
|
* <p>This method uses hexadecimal for all IPv6 addresses, including
|
||||||
|
* IPv4-mapped IPv6 addresses such as "::c000:201". The output does not
|
||||||
|
* include a Scope ID.
|
||||||
|
*
|
||||||
|
* @param ip {@link InetAddress} to be converted to an address string
|
||||||
|
* @return {@code String} containing the text-formatted IP address
|
||||||
|
* @since 10.0
|
||||||
|
*/
|
||||||
|
public static String toAddrString(InetAddress ip) {
|
||||||
|
if (ip == null) {
|
||||||
|
throw new NullPointerException("ip");
|
||||||
|
}
|
||||||
|
if (ip instanceof Inet4Address) {
|
||||||
|
// For IPv4, Java's formatting is good enough.
|
||||||
|
byte[] bytes = ip.getAddress();
|
||||||
|
return (bytes[0] & 0xff) + "." + (bytes[1] & 0xff) + "." + (bytes[2] & 0xff) + "." + (bytes[3] & 0xff);
|
||||||
|
}
|
||||||
|
if (!(ip instanceof Inet6Address)) {
|
||||||
|
throw new IllegalArgumentException("ip");
|
||||||
|
}
|
||||||
|
byte[] bytes = ip.getAddress();
|
||||||
|
int[] hextets = new int[IPV6_PART_COUNT];
|
||||||
|
for (int i = 0; i < hextets.length; i++) {
|
||||||
|
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
|
||||||
|
}
|
||||||
|
compressLongestRunOfZeroes(hextets);
|
||||||
|
return hextetsToIPv6String(hextets);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Identify and mark the longest run of zeroes in an IPv6 address.
|
||||||
|
*
|
||||||
|
* <p>Only runs of two or more hextets are considered. In case of a tie, the
|
||||||
|
* leftmost run wins. If a qualifying run is found, its hextets are replaced
|
||||||
|
* by the sentinel value -1.
|
||||||
|
*
|
||||||
|
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
|
||||||
|
*/
|
||||||
|
private static void compressLongestRunOfZeroes(int[] hextets) {
|
||||||
|
int bestRunStart = -1;
|
||||||
|
int bestRunLength = -1;
|
||||||
|
int runStart = -1;
|
||||||
|
for (int i = 0; i < hextets.length + 1; i++) {
|
||||||
|
if (i < hextets.length && hextets[i] == 0) {
|
||||||
|
if (runStart < 0) {
|
||||||
|
runStart = i;
|
||||||
|
}
|
||||||
|
} else if (runStart >= 0) {
|
||||||
|
int runLength = i - runStart;
|
||||||
|
if (runLength > bestRunLength) {
|
||||||
|
bestRunStart = runStart;
|
||||||
|
bestRunLength = runLength;
|
||||||
|
}
|
||||||
|
runStart = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (bestRunLength >= 2) {
|
||||||
|
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a list of hextets into a human-readable IPv6 address.
|
||||||
|
*
|
||||||
|
* <p>In order for "::" compression to work, the input should contain negative
|
||||||
|
* sentinel values in place of the elided zeroes.
|
||||||
|
*
|
||||||
|
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
|
||||||
|
*/
|
||||||
|
private static String hextetsToIPv6String(int[] hextets) {
|
||||||
|
/*
|
||||||
|
* While scanning the array, handle these state transitions:
|
||||||
|
* start->num => "num" start->gap => "::"
|
||||||
|
* num->num => ":num" num->gap => "::"
|
||||||
|
* gap->num => "num" gap->gap => ""
|
||||||
|
*/
|
||||||
|
StringBuilder buf = new StringBuilder(39);
|
||||||
|
boolean lastWasNumber = false;
|
||||||
|
for (int i = 0; i < hextets.length; i++) {
|
||||||
|
boolean thisIsNumber = hextets[i] >= 0;
|
||||||
|
if (thisIsNumber) {
|
||||||
|
if (lastWasNumber) {
|
||||||
|
buf.append(':');
|
||||||
|
}
|
||||||
|
buf.append(Integer.toHexString(hextets[i]));
|
||||||
|
} else {
|
||||||
|
if (i == 0 || lastWasNumber) {
|
||||||
|
buf.append("::");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastWasNumber = thisIsNumber;
|
||||||
|
}
|
||||||
|
return buf.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the {@link InetAddress} having the given string representation.
|
||||||
|
*
|
||||||
|
* <p>This deliberately avoids all nameservice lookups (e.g. no DNS).
|
||||||
|
*
|
||||||
|
* @param ipString {@code String} containing an IPv4 or IPv6 string literal, e.g.
|
||||||
|
* {@code "192.168.0.1"} or {@code "2001:db8::1"}
|
||||||
|
* @return {@link InetAddress} representing the argument
|
||||||
|
* @throws IllegalArgumentException if the argument is not a valid IP string literal
|
||||||
|
*/
|
||||||
|
public static InetAddress forString(String ipString) {
|
||||||
|
byte[] addr = ipStringToBytes(ipString);
|
||||||
|
|
||||||
|
// The argument was malformed, i.e. not an IP string literal.
|
||||||
|
if (addr == null) {
|
||||||
|
throw new IllegalArgumentException(String.format(Locale.ROOT, "'%s' is not an IP string literal.", ipString));
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesToInetAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a byte array into an InetAddress.
|
||||||
|
*
|
||||||
|
* {@link InetAddress#getByAddress} is documented as throwing a checked
|
||||||
|
* exception "if IP address is of illegal length." We replace it with
|
||||||
|
* an unchecked exception, for use by callers who already know that addr
|
||||||
|
* is an array of length 4 or 16.
|
||||||
|
*
|
||||||
|
* @param addr the raw 4-byte or 16-byte IP address in big-endian order
|
||||||
|
* @return an InetAddress object created from the raw IP address
|
||||||
|
*/
|
||||||
|
private static InetAddress bytesToInetAddress(byte[] addr) {
|
||||||
|
try {
|
||||||
|
return InetAddress.getByAddress(addr);
|
||||||
|
} catch (UnknownHostException e) {
|
||||||
|
throw new AssertionError(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.network;
|
package org.elasticsearch.common.network;
|
||||||
|
|
||||||
import com.google.common.net.InetAddresses;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
|
|
||||||
import java.net.Inet6Address;
|
import java.net.Inet6Address;
|
||||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
@ -73,7 +72,7 @@ public class NetworkService extends AbstractComponent {
|
|||||||
/**
|
/**
|
||||||
* Resolves a custom value handling, return <tt>null</tt> if can't handle it.
|
* Resolves a custom value handling, return <tt>null</tt> if can't handle it.
|
||||||
*/
|
*/
|
||||||
InetAddress[] resolveIfPossible(String value);
|
InetAddress[] resolveIfPossible(String value) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
||||||
@ -162,7 +161,7 @@ public class NetworkService extends AbstractComponent {
|
|||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
private InetAddress[] resolveInetAddress(String host) throws UnknownHostException, IOException {
|
private InetAddress[] resolveInetAddress(String host) throws IOException {
|
||||||
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
|
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
|
||||||
host = host.substring(1, host.length() - 1);
|
host = host.substring(1, host.length() - 1);
|
||||||
// allow custom resolvers to have special names
|
// allow custom resolvers to have special names
|
||||||
|
@ -23,16 +23,12 @@ import com.carrotsearch.hppc.DoubleArrayList;
|
|||||||
import com.carrotsearch.hppc.FloatArrayList;
|
import com.carrotsearch.hppc.FloatArrayList;
|
||||||
import com.carrotsearch.hppc.LongArrayList;
|
import com.carrotsearch.hppc.LongArrayList;
|
||||||
import com.carrotsearch.hppc.ObjectArrayList;
|
import com.carrotsearch.hppc.ObjectArrayList;
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.*;
|
import org.apache.lucene.util.*;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
/** Collections-related utility methods. */
|
/** Collections-related utility methods. */
|
||||||
public enum CollectionUtils {
|
public class CollectionUtils {
|
||||||
CollectionUtils;
|
|
||||||
|
|
||||||
public static void sort(LongArrayList list) {
|
public static void sort(LongArrayList list) {
|
||||||
sort(list.buffer, list.size());
|
sort(list.buffer, list.size());
|
||||||
}
|
}
|
||||||
@ -367,13 +363,6 @@ public enum CollectionUtils {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Combines multiple iterators into a single iterator.
|
|
||||||
*/
|
|
||||||
public static <T> Iterator<T> concat(Iterator<? extends T>... iterators) {
|
|
||||||
return Iterators.<T>concat(iterators);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <E> ArrayList<E> iterableAsArrayList(Iterable<? extends E> elements) {
|
public static <E> ArrayList<E> iterableAsArrayList(Iterable<? extends E> elements) {
|
||||||
if (elements == null) {
|
if (elements == null) {
|
||||||
throw new NullPointerException("elements");
|
throw new NullPointerException("elements");
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
* specific language governing permissions and limitations
|
* specific language governing permissions and limitations
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index;
|
package org.elasticsearch.index;
|
||||||
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -34,6 +35,7 @@ import org.elasticsearch.index.termvectors.TermVectorsService;
|
|||||||
import org.elasticsearch.indices.IndicesLifecycle;
|
import org.elasticsearch.indices.IndicesLifecycle;
|
||||||
import org.elasticsearch.indices.IndicesWarmer;
|
import org.elasticsearch.indices.IndicesWarmer;
|
||||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||||
|
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -58,9 +60,10 @@ public final class IndexServicesProvider {
|
|||||||
private final EngineFactory factory;
|
private final EngineFactory factory;
|
||||||
private final BigArrays bigArrays;
|
private final BigArrays bigArrays;
|
||||||
private final IndexSearcherWrapper indexSearcherWrapper;
|
private final IndexSearcherWrapper indexSearcherWrapper;
|
||||||
|
private final IndexingMemoryController indexingMemoryController;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper) {
|
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingMemoryController indexingMemoryController) {
|
||||||
this.indicesLifecycle = indicesLifecycle;
|
this.indicesLifecycle = indicesLifecycle;
|
||||||
this.threadPool = threadPool;
|
this.threadPool = threadPool;
|
||||||
this.mapperService = mapperService;
|
this.mapperService = mapperService;
|
||||||
@ -76,6 +79,7 @@ public final class IndexServicesProvider {
|
|||||||
this.factory = factory;
|
this.factory = factory;
|
||||||
this.bigArrays = bigArrays;
|
this.bigArrays = bigArrays;
|
||||||
this.indexSearcherWrapper = indexSearcherWrapper;
|
this.indexSearcherWrapper = indexSearcherWrapper;
|
||||||
|
this.indexingMemoryController = indexingMemoryController;
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndicesLifecycle getIndicesLifecycle() {
|
public IndicesLifecycle getIndicesLifecycle() {
|
||||||
@ -134,5 +138,11 @@ public final class IndexServicesProvider {
|
|||||||
return bigArrays;
|
return bigArrays;
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexSearcherWrapper getIndexSearcherWrapper() { return indexSearcherWrapper; }
|
public IndexSearcherWrapper getIndexSearcherWrapper() {
|
||||||
|
return indexSearcherWrapper;
|
||||||
|
}
|
||||||
|
|
||||||
|
public IndexingMemoryController getIndexingMemoryController() {
|
||||||
|
return indexingMemoryController;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,24 +31,37 @@ import java.io.IOException;
|
|||||||
public enum VersionType implements Writeable<VersionType> {
|
public enum VersionType implements Writeable<VersionType> {
|
||||||
INTERNAL((byte) 0) {
|
INTERNAL((byte) 0) {
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
return isVersionConflict(currentVersion, expectedVersion);
|
return isVersionConflict(currentVersion, expectedVersion, deleted);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
|
if (expectedVersion == Versions.MATCH_DELETED) {
|
||||||
|
return "document already exists (current version [" + currentVersion + "])";
|
||||||
|
}
|
||||||
|
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
return isVersionConflict(currentVersion, expectedVersion);
|
return isVersionConflict(currentVersion, expectedVersion, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isVersionConflict(long currentVersion, long expectedVersion) {
|
@Override
|
||||||
|
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
|
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isVersionConflict(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (expectedVersion == Versions.MATCH_ANY) {
|
if (expectedVersion == Versions.MATCH_ANY) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (currentVersion == Versions.NOT_FOUND) {
|
if (expectedVersion == Versions.MATCH_DELETED) {
|
||||||
return true;
|
return deleted == false;
|
||||||
}
|
}
|
||||||
if (currentVersion != expectedVersion) {
|
if (currentVersion != expectedVersion) {
|
||||||
return true;
|
return true;
|
||||||
@ -63,8 +76,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean validateVersionForWrites(long version) {
|
public boolean validateVersionForWrites(long version) {
|
||||||
// not allowing Versions.NOT_FOUND as it is not a valid input value.
|
return version > 0L || version == Versions.MATCH_ANY || version == Versions.MATCH_DELETED;
|
||||||
return version > 0L || version == Versions.MATCH_ANY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -82,7 +94,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
},
|
},
|
||||||
EXTERNAL((byte) 1) {
|
EXTERNAL((byte) 1) {
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -98,6 +110,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
|
return "current version [" + currentVersion + "] is higher or equal to the one provided [" + expectedVersion + "]";
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
@ -115,6 +132,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
|
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||||
return expectedVersion;
|
return expectedVersion;
|
||||||
@ -133,7 +155,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
},
|
},
|
||||||
EXTERNAL_GTE((byte) 2) {
|
EXTERNAL_GTE((byte) 2) {
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -149,6 +171,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
|
return "current version [" + currentVersion + "] is higher than the one provided [" + expectedVersion + "]";
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
@ -166,6 +193,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
|
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||||
return expectedVersion;
|
return expectedVersion;
|
||||||
@ -187,7 +219,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
*/
|
*/
|
||||||
FORCE((byte) 3) {
|
FORCE((byte) 3) {
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
if (currentVersion == Versions.NOT_SET) {
|
if (currentVersion == Versions.NOT_SET) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -195,16 +227,26 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (expectedVersion == Versions.MATCH_ANY) {
|
if (expectedVersion == Versions.MATCH_ANY) {
|
||||||
return true;
|
throw new IllegalStateException("you must specify a version when use VersionType.FORCE");
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||||
|
throw new AssertionError("VersionType.FORCE should never result in a write conflict");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||||
|
throw new AssertionError("VersionType.FORCE should never result in a read conflict");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||||
return expectedVersion;
|
return expectedVersion;
|
||||||
@ -237,17 +279,46 @@ public enum VersionType implements Writeable<VersionType> {
|
|||||||
/**
|
/**
|
||||||
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
||||||
*
|
*
|
||||||
|
* @param currentVersion the current version for the document
|
||||||
|
* @param expectedVersion the version specified for the write operation
|
||||||
|
* @param deleted true if the document is currently deleted (note that #currentVersion will typically be
|
||||||
|
* {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted
|
||||||
* @return true if versions conflict false o.w.
|
* @return true if versions conflict false o.w.
|
||||||
*/
|
*/
|
||||||
public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion);
|
public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a human readable explanation for a version conflict on write.
|
||||||
|
*
|
||||||
|
* Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true;
|
||||||
|
*
|
||||||
|
* @param currentVersion the current version for the document
|
||||||
|
* @param expectedVersion the version specified for the write operation
|
||||||
|
* @param deleted true if the document is currently deleted (note that #currentVersion will typically be
|
||||||
|
* {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted
|
||||||
|
*/
|
||||||
|
public abstract String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
||||||
*
|
*
|
||||||
|
* @param currentVersion the current version for the document
|
||||||
|
* @param expectedVersion the version specified for the read operation
|
||||||
* @return true if versions conflict false o.w.
|
* @return true if versions conflict false o.w.
|
||||||
*/
|
*/
|
||||||
public abstract boolean isVersionConflictForReads(long currentVersion, long expectedVersion);
|
public abstract boolean isVersionConflictForReads(long currentVersion, long expectedVersion);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a human readable explanation for a version conflict on read.
|
||||||
|
*
|
||||||
|
* Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true;
|
||||||
|
*
|
||||||
|
* @param currentVersion the current version for the document
|
||||||
|
* @param expectedVersion the version specified for the read operation
|
||||||
|
*/
|
||||||
|
public abstract String explainConflictForReads(long currentVersion, long expectedVersion);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the new version for a document, based on its current one and the specified in the request
|
* Returns the new version for a document, based on its current one and the specified in the request
|
||||||
*
|
*
|
||||||
|
@ -19,11 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.cache.bitset;
|
package org.elasticsearch.index.cache.bitset;
|
||||||
|
|
||||||
import com.google.common.cache.Cache;
|
|
||||||
import com.google.common.cache.CacheBuilder;
|
|
||||||
import com.google.common.cache.RemovalListener;
|
|
||||||
import com.google.common.cache.RemovalNotification;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.LeafReader;
|
import org.apache.lucene.index.LeafReader;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
@ -38,6 +33,10 @@ import org.apache.lucene.util.BitDocIdSet;
|
|||||||
import org.apache.lucene.util.BitSet;
|
import org.apache.lucene.util.BitSet;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.common.cache.Cache;
|
||||||
|
import org.elasticsearch.common.cache.CacheBuilder;
|
||||||
|
import org.elasticsearch.common.cache.RemovalListener;
|
||||||
|
import org.elasticsearch.common.cache.RemovalNotification;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.lucene.search.Queries;
|
import org.elasticsearch.common.lucene.search.Queries;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
@ -58,10 +57,11 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.*;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time.
|
* This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time.
|
||||||
@ -94,10 +94,11 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||||||
public BitsetFilterCache(Index index, @IndexSettings Settings indexSettings) {
|
public BitsetFilterCache(Index index, @IndexSettings Settings indexSettings) {
|
||||||
super(index, indexSettings);
|
super(index, indexSettings);
|
||||||
this.loadRandomAccessFiltersEagerly = indexSettings.getAsBoolean(LOAD_RANDOM_ACCESS_FILTERS_EAGERLY, true);
|
this.loadRandomAccessFiltersEagerly = indexSettings.getAsBoolean(LOAD_RANDOM_ACCESS_FILTERS_EAGERLY, true);
|
||||||
this.loadedFilters = CacheBuilder.newBuilder().removalListener(this).build();
|
this.loadedFilters = CacheBuilder.<Object, Cache<Query, Value>>builder().removalListener(this).build();
|
||||||
this.warmer = new BitSetProducerWarmer();
|
this.warmer = new BitSetProducerWarmer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Inject(optional = true)
|
@Inject(optional = true)
|
||||||
public void setIndicesWarmer(IndicesWarmer indicesWarmer) {
|
public void setIndicesWarmer(IndicesWarmer indicesWarmer) {
|
||||||
this.indicesWarmer = indicesWarmer;
|
this.indicesWarmer = indicesWarmer;
|
||||||
@ -144,14 +145,12 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||||||
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
|
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
|
||||||
final Object coreCacheReader = context.reader().getCoreCacheKey();
|
final Object coreCacheReader = context.reader().getCoreCacheKey();
|
||||||
final ShardId shardId = ShardUtils.extractShardId(context.reader());
|
final ShardId shardId = ShardUtils.extractShardId(context.reader());
|
||||||
Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
|
Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> {
|
||||||
@Override
|
context.reader().addCoreClosedListener(BitsetFilterCache.this);
|
||||||
public Cache<Query, Value> call() throws Exception {
|
return CacheBuilder.<Query, Value>builder().build();
|
||||||
context.reader().addCoreClosedListener(BitsetFilterCache.this);
|
|
||||||
return CacheBuilder.newBuilder().build();
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
return filterToFbs.get(query, () -> {
|
|
||||||
|
return filterToFbs.computeIfAbsent(query, key -> {
|
||||||
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
|
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
|
||||||
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
||||||
searcher.setQueryCache(null);
|
searcher.setQueryCache(null);
|
||||||
@ -172,8 +171,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onRemoval(RemovalNotification<Object, Cache<Query, Value>> notification) {
|
public void onRemoval(RemovalNotification<Object, Cache<Query, Value>> notification) {
|
||||||
Object key = notification.getKey();
|
if (notification.getKey() == null) {
|
||||||
if (key == null) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +180,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (Value value : valueCache.asMap().values()) {
|
for (Value value : valueCache.values()) {
|
||||||
listener.onRemoval(value.shardId, value.bitset);
|
listener.onRemoval(value.shardId, value.bitset);
|
||||||
// if null then this means the shard has already been removed and the stats are 0 anyway for the shard this key belongs to
|
// if null then this means the shard has already been removed and the stats are 0 anyway for the shard this key belongs to
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.cache.request;
|
package org.elasticsearch.index.cache.request;
|
||||||
|
|
||||||
import com.google.common.cache.RemovalListener;
|
import org.elasticsearch.common.cache.RemovalListener;
|
||||||
import com.google.common.cache.RemovalNotification;
|
import org.elasticsearch.common.cache.RemovalNotification;
|
||||||
|
|
||||||
import org.elasticsearch.common.inject.Inject;
|
|
||||||
import org.elasticsearch.common.metrics.CounterMetric;
|
import org.elasticsearch.common.metrics.CounterMetric;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
@ -61,7 +59,7 @@ public class ShardRequestCache extends AbstractIndexShardComponent implements Re
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> removalNotification) {
|
public void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> removalNotification) {
|
||||||
if (removalNotification.wasEvicted()) {
|
if (removalNotification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED) {
|
||||||
evictionsMetric.inc();
|
evictionsMetric.inc();
|
||||||
}
|
}
|
||||||
long dec = 0;
|
long dec = 0;
|
||||||
|
@ -1,440 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.codec.postingsformat;
|
|
||||||
|
|
||||||
import org.apache.lucene.codecs.*;
|
|
||||||
import org.apache.lucene.index.*;
|
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
|
||||||
import org.apache.lucene.store.*;
|
|
||||||
import org.apache.lucene.util.*;
|
|
||||||
import org.elasticsearch.common.util.BloomFilter;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.*;
|
|
||||||
import java.util.Map.Entry;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p>
|
|
||||||
* A {@link PostingsFormat} useful for low doc-frequency fields such as primary
|
|
||||||
* keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail"
|
|
||||||
* for reads in segments known to have no record of the key. A choice of
|
|
||||||
* delegate PostingsFormat is used to record all other Postings data.
|
|
||||||
* </p>
|
|
||||||
* <p>
|
|
||||||
* This is a special bloom filter version, based on {@link org.elasticsearch.common.util.BloomFilter} and inspired
|
|
||||||
* by Lucene {@code org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat}.
|
|
||||||
* @deprecated only for reading old segments
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class BloomFilterPostingsFormat extends PostingsFormat {
|
|
||||||
|
|
||||||
public static final String BLOOM_CODEC_NAME = "XBloomFilter"; // the Lucene one is named BloomFilter
|
|
||||||
public static final int BLOOM_CODEC_VERSION = 1;
|
|
||||||
public static final int BLOOM_CODEC_VERSION_CHECKSUM = 2;
|
|
||||||
public static final int BLOOM_CODEC_VERSION_CURRENT = BLOOM_CODEC_VERSION_CHECKSUM;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extension of Bloom Filters file
|
|
||||||
*/
|
|
||||||
static final String BLOOM_EXTENSION = "blm";
|
|
||||||
|
|
||||||
private BloomFilter.Factory bloomFilterFactory = BloomFilter.Factory.DEFAULT;
|
|
||||||
private PostingsFormat delegatePostingsFormat;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates Bloom filters for a selection of fields created in the index. This
|
|
||||||
* is recorded as a set of Bitsets held as a segment summary in an additional
|
|
||||||
* "blm" file. This PostingsFormat delegates to a choice of delegate
|
|
||||||
* PostingsFormat for encoding all other postings data.
|
|
||||||
*
|
|
||||||
* @param delegatePostingsFormat The PostingsFormat that records all the non-bloom filter data i.e.
|
|
||||||
* postings info.
|
|
||||||
* @param bloomFilterFactory The {@link org.elasticsearch.common.util.BloomFilter.Factory} responsible for sizing BloomFilters
|
|
||||||
* appropriately
|
|
||||||
*/
|
|
||||||
public BloomFilterPostingsFormat(PostingsFormat delegatePostingsFormat,
|
|
||||||
BloomFilter.Factory bloomFilterFactory) {
|
|
||||||
super(BLOOM_CODEC_NAME);
|
|
||||||
this.delegatePostingsFormat = delegatePostingsFormat;
|
|
||||||
this.bloomFilterFactory = bloomFilterFactory;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used only by core Lucene at read-time via Service Provider instantiation -
|
|
||||||
// do not use at Write-time in application code.
|
|
||||||
public BloomFilterPostingsFormat() {
|
|
||||||
super(BLOOM_CODEC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("this codec can only be used for reading");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BloomFilteredFieldsProducer fieldsProducer(SegmentReadState state)
|
|
||||||
throws IOException {
|
|
||||||
return new BloomFilteredFieldsProducer(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PostingsFormat getDelegate() {
|
|
||||||
return delegatePostingsFormat;
|
|
||||||
}
|
|
||||||
|
|
||||||
private final class LazyBloomLoader implements Accountable {
|
|
||||||
private final long offset;
|
|
||||||
private final IndexInput indexInput;
|
|
||||||
private BloomFilter filter;
|
|
||||||
|
|
||||||
private LazyBloomLoader(long offset, IndexInput origial) {
|
|
||||||
this.offset = offset;
|
|
||||||
this.indexInput = origial.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized BloomFilter get() throws IOException {
|
|
||||||
if (filter == null) {
|
|
||||||
try (final IndexInput input = indexInput) {
|
|
||||||
input.seek(offset);
|
|
||||||
this.filter = BloomFilter.deserialize(input);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return filter == null ? 0l : filter.getSizeInBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
return Collections.singleton(Accountables.namedAccountable("bloom", ramBytesUsed()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public final class BloomFilteredFieldsProducer extends FieldsProducer {
|
|
||||||
private FieldsProducer delegateFieldsProducer;
|
|
||||||
HashMap<String, LazyBloomLoader> bloomsByFieldName = new HashMap<>();
|
|
||||||
private final int version;
|
|
||||||
private final IndexInput data;
|
|
||||||
|
|
||||||
// for internal use only
|
|
||||||
FieldsProducer getDelegate() {
|
|
||||||
return delegateFieldsProducer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public BloomFilteredFieldsProducer(SegmentReadState state)
|
|
||||||
throws IOException {
|
|
||||||
|
|
||||||
final String bloomFileName = IndexFileNames.segmentFileName(
|
|
||||||
state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
|
|
||||||
final Directory directory = state.directory;
|
|
||||||
IndexInput dataInput = directory.openInput(bloomFileName, state.context);
|
|
||||||
try {
|
|
||||||
ChecksumIndexInput bloomIn = new BufferedChecksumIndexInput(dataInput.clone());
|
|
||||||
version = CodecUtil.checkHeader(bloomIn, BLOOM_CODEC_NAME, BLOOM_CODEC_VERSION,
|
|
||||||
BLOOM_CODEC_VERSION_CURRENT);
|
|
||||||
// // Load the hash function used in the BloomFilter
|
|
||||||
// hashFunction = HashFunction.forName(bloomIn.readString());
|
|
||||||
// Load the delegate postings format
|
|
||||||
final String delegatePostings = bloomIn.readString();
|
|
||||||
this.delegateFieldsProducer = PostingsFormat.forName(delegatePostings)
|
|
||||||
.fieldsProducer(state);
|
|
||||||
this.data = dataInput;
|
|
||||||
dataInput = null; // null it out such that we don't close it
|
|
||||||
} finally {
|
|
||||||
IOUtils.closeWhileHandlingException(dataInput);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Iterator<String> iterator() {
|
|
||||||
return delegateFieldsProducer.iterator();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException {
|
|
||||||
IOUtils.close(data, delegateFieldsProducer);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Terms terms(String field) throws IOException {
|
|
||||||
LazyBloomLoader filter = bloomsByFieldName.get(field);
|
|
||||||
if (filter == null) {
|
|
||||||
return delegateFieldsProducer.terms(field);
|
|
||||||
} else {
|
|
||||||
Terms result = delegateFieldsProducer.terms(field);
|
|
||||||
if (result == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return new BloomFilteredTerms(result, filter.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int size() {
|
|
||||||
return delegateFieldsProducer.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
long size = delegateFieldsProducer.ramBytesUsed();
|
|
||||||
for (LazyBloomLoader bloomFilter : bloomsByFieldName.values()) {
|
|
||||||
size += bloomFilter.ramBytesUsed();
|
|
||||||
}
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
List<Accountable> resources = new ArrayList<>();
|
|
||||||
resources.addAll(Accountables.namedAccountables("field", bloomsByFieldName));
|
|
||||||
if (delegateFieldsProducer != null) {
|
|
||||||
resources.add(Accountables.namedAccountable("delegate", delegateFieldsProducer));
|
|
||||||
}
|
|
||||||
return Collections.unmodifiableList(resources);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void checkIntegrity() throws IOException {
|
|
||||||
delegateFieldsProducer.checkIntegrity();
|
|
||||||
if (version >= BLOOM_CODEC_VERSION_CHECKSUM) {
|
|
||||||
CodecUtil.checksumEntireFile(data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FieldsProducer getMergeInstance() throws IOException {
|
|
||||||
return delegateFieldsProducer.getMergeInstance();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static final class BloomFilteredTerms extends FilterLeafReader.FilterTerms {
|
|
||||||
private BloomFilter filter;
|
|
||||||
|
|
||||||
public BloomFilteredTerms(Terms terms, BloomFilter filter) {
|
|
||||||
super(terms);
|
|
||||||
this.filter = filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
public BloomFilter getFilter() {
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TermsEnum iterator() throws IOException {
|
|
||||||
return new BloomFilteredTermsEnum(this.in, filter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static final class BloomFilteredTermsEnum extends TermsEnum {
|
|
||||||
|
|
||||||
private Terms delegateTerms;
|
|
||||||
private TermsEnum delegateTermsEnum;
|
|
||||||
private BloomFilter filter;
|
|
||||||
|
|
||||||
public BloomFilteredTermsEnum(Terms other, BloomFilter filter) {
|
|
||||||
this.delegateTerms = other;
|
|
||||||
this.filter = filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
void reset(Terms others) {
|
|
||||||
this.delegateTermsEnum = null;
|
|
||||||
this.delegateTerms = others;
|
|
||||||
}
|
|
||||||
|
|
||||||
private TermsEnum getDelegate() throws IOException {
|
|
||||||
if (delegateTermsEnum == null) {
|
|
||||||
/* pull the iterator only if we really need it -
|
|
||||||
* this can be a relatively heavy operation depending on the
|
|
||||||
* delegate postings format and they underlying directory
|
|
||||||
* (clone IndexInput) */
|
|
||||||
delegateTermsEnum = delegateTerms.iterator();
|
|
||||||
}
|
|
||||||
return delegateTermsEnum;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final BytesRef next() throws IOException {
|
|
||||||
return getDelegate().next();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final boolean seekExact(BytesRef text)
|
|
||||||
throws IOException {
|
|
||||||
// The magical fail-fast speed up that is the entire point of all of
|
|
||||||
// this code - save a disk seek if there is a match on an in-memory
|
|
||||||
// structure
|
|
||||||
// that may occasionally give a false positive but guaranteed no false
|
|
||||||
// negatives
|
|
||||||
if (!filter.mightContain(text)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return getDelegate().seekExact(text);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final SeekStatus seekCeil(BytesRef text)
|
|
||||||
throws IOException {
|
|
||||||
return getDelegate().seekCeil(text);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final void seekExact(long ord) throws IOException {
|
|
||||||
getDelegate().seekExact(ord);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final BytesRef term() throws IOException {
|
|
||||||
return getDelegate().term();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final long ord() throws IOException {
|
|
||||||
return getDelegate().ord();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final int docFreq() throws IOException {
|
|
||||||
return getDelegate().docFreq();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final long totalTermFreq() throws IOException {
|
|
||||||
return getDelegate().totalTermFreq();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
|
|
||||||
return getDelegate().postings(reuse, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: would be great to move this out to test code, but the interaction between es090 and bloom is complex
|
|
||||||
// at least it is not accessible via SPI
|
|
||||||
public final class BloomFilteredFieldsConsumer extends FieldsConsumer {
|
|
||||||
private final FieldsConsumer delegateFieldsConsumer;
|
|
||||||
private final Map<FieldInfo, BloomFilter> bloomFilters = new HashMap<>();
|
|
||||||
private final SegmentWriteState state;
|
|
||||||
private boolean closed = false;
|
|
||||||
|
|
||||||
// private PostingsFormat delegatePostingsFormat;
|
|
||||||
|
|
||||||
public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer,
|
|
||||||
SegmentWriteState state, PostingsFormat delegatePostingsFormat) {
|
|
||||||
this.delegateFieldsConsumer = fieldsConsumer;
|
|
||||||
// this.delegatePostingsFormat=delegatePostingsFormat;
|
|
||||||
this.state = state;
|
|
||||||
}
|
|
||||||
|
|
||||||
// for internal use only
|
|
||||||
public FieldsConsumer getDelegate() {
|
|
||||||
return delegateFieldsConsumer;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void write(Fields fields) throws IOException {
|
|
||||||
|
|
||||||
// Delegate must write first: it may have opened files
|
|
||||||
// on creating the class
|
|
||||||
// (e.g. Lucene41PostingsConsumer), and write() will
|
|
||||||
// close them; alternatively, if we delayed pulling
|
|
||||||
// the fields consumer until here, we could do it
|
|
||||||
// afterwards:
|
|
||||||
delegateFieldsConsumer.write(fields);
|
|
||||||
|
|
||||||
for(String field : fields) {
|
|
||||||
Terms terms = fields.terms(field);
|
|
||||||
if (terms == null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
|
|
||||||
TermsEnum termsEnum = terms.iterator();
|
|
||||||
|
|
||||||
BloomFilter bloomFilter = null;
|
|
||||||
|
|
||||||
PostingsEnum postings = null;
|
|
||||||
while (true) {
|
|
||||||
BytesRef term = termsEnum.next();
|
|
||||||
if (term == null) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (bloomFilter == null) {
|
|
||||||
bloomFilter = bloomFilterFactory.createFilter(state.segmentInfo.maxDoc());
|
|
||||||
assert bloomFilters.containsKey(field) == false;
|
|
||||||
bloomFilters.put(fieldInfo, bloomFilter);
|
|
||||||
}
|
|
||||||
// Make sure there's at least one doc for this term:
|
|
||||||
postings = termsEnum.postings(postings, 0);
|
|
||||||
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
|
||||||
bloomFilter.put(term);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException {
|
|
||||||
if (closed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
closed = true;
|
|
||||||
delegateFieldsConsumer.close();
|
|
||||||
// Now we are done accumulating values for these fields
|
|
||||||
List<Entry<FieldInfo, BloomFilter>> nonSaturatedBlooms = new ArrayList<>();
|
|
||||||
|
|
||||||
for (Entry<FieldInfo, BloomFilter> entry : bloomFilters.entrySet()) {
|
|
||||||
nonSaturatedBlooms.add(entry);
|
|
||||||
}
|
|
||||||
String bloomFileName = IndexFileNames.segmentFileName(
|
|
||||||
state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
|
|
||||||
IndexOutput bloomOutput = null;
|
|
||||||
try {
|
|
||||||
bloomOutput = state.directory
|
|
||||||
.createOutput(bloomFileName, state.context);
|
|
||||||
CodecUtil.writeHeader(bloomOutput, BLOOM_CODEC_NAME,
|
|
||||||
BLOOM_CODEC_VERSION_CURRENT);
|
|
||||||
// remember the name of the postings format we will delegate to
|
|
||||||
bloomOutput.writeString(delegatePostingsFormat.getName());
|
|
||||||
|
|
||||||
// First field in the output file is the number of fields+blooms saved
|
|
||||||
bloomOutput.writeInt(nonSaturatedBlooms.size());
|
|
||||||
for (Entry<FieldInfo, BloomFilter> entry : nonSaturatedBlooms) {
|
|
||||||
FieldInfo fieldInfo = entry.getKey();
|
|
||||||
BloomFilter bloomFilter = entry.getValue();
|
|
||||||
bloomOutput.writeInt(fieldInfo.number);
|
|
||||||
saveAppropriatelySizedBloomFilter(bloomOutput, bloomFilter, fieldInfo);
|
|
||||||
}
|
|
||||||
CodecUtil.writeFooter(bloomOutput);
|
|
||||||
} finally {
|
|
||||||
IOUtils.close(bloomOutput);
|
|
||||||
}
|
|
||||||
//We are done with large bitsets so no need to keep them hanging around
|
|
||||||
bloomFilters.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void saveAppropriatelySizedBloomFilter(IndexOutput bloomOutput,
|
|
||||||
BloomFilter bloomFilter, FieldInfo fieldInfo) throws IOException {
|
|
||||||
BloomFilter.serilaize(bloomFilter, bloomOutput);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,77 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.elasticsearch.index.codec.postingsformat;
|
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.apache.lucene.codecs.FieldsConsumer;
|
|
||||||
import org.apache.lucene.codecs.FieldsProducer;
|
|
||||||
import org.apache.lucene.codecs.PostingsFormat;
|
|
||||||
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
|
|
||||||
import org.apache.lucene.index.Fields;
|
|
||||||
import org.apache.lucene.index.FilterLeafReader;
|
|
||||||
import org.apache.lucene.index.SegmentReadState;
|
|
||||||
import org.apache.lucene.index.SegmentWriteState;
|
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
|
||||||
import org.elasticsearch.common.util.BloomFilter;
|
|
||||||
import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer;
|
|
||||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.function.Predicate;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the old default postings format for Elasticsearch that special cases
|
|
||||||
* the <tt>_uid</tt> field to use a bloom filter while all other fields
|
|
||||||
* will use a {@link Lucene50PostingsFormat}. This format will reuse the underlying
|
|
||||||
* {@link Lucene50PostingsFormat} and its files also for the <tt>_uid</tt> saving up to
|
|
||||||
* 5 files per segment in the default case.
|
|
||||||
* <p>
|
|
||||||
* @deprecated only for reading old segments
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class Elasticsearch090PostingsFormat extends PostingsFormat {
|
|
||||||
protected final BloomFilterPostingsFormat bloomPostings;
|
|
||||||
|
|
||||||
public Elasticsearch090PostingsFormat() {
|
|
||||||
super("es090");
|
|
||||||
Lucene50PostingsFormat delegate = new Lucene50PostingsFormat();
|
|
||||||
assert delegate.getName().equals(Lucene.LATEST_POSTINGS_FORMAT);
|
|
||||||
bloomPostings = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PostingsFormat getDefaultWrapped() {
|
|
||||||
return bloomPostings.getDelegate();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected static final Predicate<String> UID_FIELD_FILTER = field -> UidFieldMapper.NAME.equals(field);
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("this codec can only be used for reading");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
|
||||||
// we can just return the delegate here since we didn't record bloom filters for
|
|
||||||
// the other fields.
|
|
||||||
return bloomPostings.fieldsProducer(state);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,66 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.engine;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class CreateFailedEngineException extends EngineException {
|
|
||||||
|
|
||||||
private final String type;
|
|
||||||
|
|
||||||
private final String id;
|
|
||||||
|
|
||||||
public CreateFailedEngineException(ShardId shardId, String type, String id, Throwable cause) {
|
|
||||||
super(shardId, "Create failed for [" + type + "#" + id + "]", cause);
|
|
||||||
Objects.requireNonNull(type, "type must not be null");
|
|
||||||
Objects.requireNonNull(id, "id must not be null");
|
|
||||||
this.type = type;
|
|
||||||
this.id = id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public CreateFailedEngineException(StreamInput in) throws IOException{
|
|
||||||
super(in);
|
|
||||||
type = in.readString();
|
|
||||||
id = in.readString();
|
|
||||||
}
|
|
||||||
|
|
||||||
public String type() {
|
|
||||||
return this.type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String id() {
|
|
||||||
return this.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
|
||||||
super.writeTo(out);
|
|
||||||
out.writeString(type);
|
|
||||||
out.writeString(id);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.engine;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */
|
|
||||||
@Deprecated
|
|
||||||
public class DeleteByQueryFailedEngineException extends EngineException {
|
|
||||||
|
|
||||||
public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) {
|
|
||||||
super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
public DeleteByQueryFailedEngineException(StreamInput in) throws IOException{
|
|
||||||
super(in);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.elasticsearch.index.engine;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
import org.elasticsearch.rest.RestStatus;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class DocumentAlreadyExistsException extends EngineException {
|
|
||||||
|
|
||||||
public DocumentAlreadyExistsException(ShardId shardId, String type, String id) {
|
|
||||||
super(shardId, "[" + type + "][" + id + "]: document already exists");
|
|
||||||
}
|
|
||||||
|
|
||||||
public DocumentAlreadyExistsException(StreamInput in) throws IOException{
|
|
||||||
super(in);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public RestStatus status() {
|
|
||||||
return RestStatus.CONFLICT;
|
|
||||||
}
|
|
||||||
}
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
|
|||||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||||
import org.elasticsearch.index.mapper.Uid;
|
import org.elasticsearch.index.mapper.Uid;
|
||||||
import org.elasticsearch.index.merge.MergeStats;
|
import org.elasticsearch.index.merge.MergeStats;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
@ -60,7 +59,6 @@ import java.util.concurrent.locks.Lock;
|
|||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
@ -144,7 +142,8 @@ public abstract class Engine implements Closeable {
|
|||||||
return new MergeStats();
|
return new MergeStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** A throttling class that can be activated, causing the
|
/**
|
||||||
|
* A throttling class that can be activated, causing the
|
||||||
* {@code acquireThrottle} method to block on a lock when throttling
|
* {@code acquireThrottle} method to block on a lock when throttling
|
||||||
* is enabled
|
* is enabled
|
||||||
*/
|
*/
|
||||||
@ -203,20 +202,15 @@ public abstract class Engine implements Closeable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public abstract void create(Create create) throws EngineException;
|
public abstract boolean index(Index operation) throws EngineException;
|
||||||
|
|
||||||
public abstract boolean index(Index index) throws EngineException;
|
|
||||||
|
|
||||||
public abstract void delete(Delete delete) throws EngineException;
|
public abstract void delete(Delete delete) throws EngineException;
|
||||||
|
|
||||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
|
||||||
@Deprecated
|
|
||||||
public abstract void delete(DeleteByQuery delete) throws EngineException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
|
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
|
||||||
* succeeds if there are not pending writes in lucene and the current point is equal to the expected one.
|
* succeeds if there are not pending writes in lucene and the current point is equal to the expected one.
|
||||||
* @param syncId id of this sync
|
*
|
||||||
|
* @param syncId id of this sync
|
||||||
* @param expectedCommitId the expected value of
|
* @param expectedCommitId the expected value of
|
||||||
* @return true if the sync commit was made, false o.w.
|
* @return true if the sync commit was made, false o.w.
|
||||||
*/
|
*/
|
||||||
@ -243,7 +237,8 @@ public abstract class Engine implements Closeable {
|
|||||||
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
|
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
|
||||||
Releasables.close(searcher);
|
Releasables.close(searcher);
|
||||||
Uid uid = Uid.createUid(get.uid().text());
|
Uid uid = Uid.createUid(get.uid().text());
|
||||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
|
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
||||||
|
get.versionType().explainConflictForReads(docIdAndVersion.version, get.version()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,7 +323,7 @@ public abstract class Engine implements Closeable {
|
|||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to reading from the store if reading from the commit fails
|
// Fall back to reading from the store if reading from the commit fails
|
||||||
try {
|
try {
|
||||||
return store. readLastCommittedSegmentsInfo();
|
return store.readLastCommittedSegmentsInfo();
|
||||||
} catch (IOException e2) {
|
} catch (IOException e2) {
|
||||||
e2.addSuppressed(e);
|
e2.addSuppressed(e);
|
||||||
throw e2;
|
throw e2;
|
||||||
@ -366,6 +361,9 @@ public abstract class Engine implements Closeable {
|
|||||||
stats.addIndexWriterMaxMemoryInBytes(0);
|
stats.addIndexWriterMaxMemoryInBytes(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** How much heap Lucene's IndexWriter is using */
|
||||||
|
abstract public long indexWriterRAMBytesUsed();
|
||||||
|
|
||||||
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
|
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
Map<String, Segment> segments = new HashMap<>();
|
Map<String, Segment> segments = new HashMap<>();
|
||||||
@ -469,7 +467,8 @@ public abstract class Engine implements Closeable {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Flushes the state of the engine including the transaction log, clearing memory.
|
* Flushes the state of the engine including the transaction log, clearing memory.
|
||||||
* @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed.
|
*
|
||||||
|
* @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed.
|
||||||
* @param waitIfOngoing if <code>true</code> this call will block until all currently running flushes have finished.
|
* @param waitIfOngoing if <code>true</code> this call will block until all currently running flushes have finished.
|
||||||
* Otherwise this call will return without blocking.
|
* Otherwise this call will return without blocking.
|
||||||
* @return the commit Id for the resulting commit
|
* @return the commit Id for the resulting commit
|
||||||
@ -607,62 +606,97 @@ public abstract class Engine implements Closeable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static interface Operation {
|
public static abstract class Operation {
|
||||||
static enum Type {
|
|
||||||
CREATE,
|
|
||||||
INDEX,
|
|
||||||
DELETE
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum Origin {
|
|
||||||
PRIMARY,
|
|
||||||
REPLICA,
|
|
||||||
RECOVERY
|
|
||||||
}
|
|
||||||
|
|
||||||
Type opType();
|
|
||||||
|
|
||||||
Origin origin();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static abstract class IndexingOperation implements Operation {
|
|
||||||
|
|
||||||
private final Term uid;
|
private final Term uid;
|
||||||
private final ParsedDocument doc;
|
|
||||||
private long version;
|
private long version;
|
||||||
private final VersionType versionType;
|
private final VersionType versionType;
|
||||||
private final Origin origin;
|
private final Origin origin;
|
||||||
private Translog.Location location;
|
private Translog.Location location;
|
||||||
|
|
||||||
private final long startTime;
|
private final long startTime;
|
||||||
private long endTime;
|
private long endTime;
|
||||||
|
|
||||||
public IndexingOperation(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
public Operation(Term uid, long version, VersionType versionType, Origin origin, long startTime) {
|
||||||
this.uid = uid;
|
this.uid = uid;
|
||||||
this.doc = doc;
|
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.versionType = versionType;
|
this.versionType = versionType;
|
||||||
this.origin = origin;
|
this.origin = origin;
|
||||||
this.startTime = startTime;
|
this.startTime = startTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexingOperation(Term uid, ParsedDocument doc) {
|
public static enum Origin {
|
||||||
this(uid, doc, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
|
PRIMARY,
|
||||||
|
REPLICA,
|
||||||
|
RECOVERY
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Origin origin() {
|
public Origin origin() {
|
||||||
return this.origin;
|
return this.origin;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ParsedDocument parsedDoc() {
|
|
||||||
return this.doc;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Term uid() {
|
public Term uid() {
|
||||||
return this.uid;
|
return this.uid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long version() {
|
||||||
|
return this.version;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateVersion(long version) {
|
||||||
|
this.version = version;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTranslogLocation(Translog.Location location) {
|
||||||
|
this.location = location;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Translog.Location getTranslogLocation() {
|
||||||
|
return this.location;
|
||||||
|
}
|
||||||
|
|
||||||
|
public VersionType versionType() {
|
||||||
|
return this.versionType;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns operation start time in nanoseconds.
|
||||||
|
*/
|
||||||
|
public long startTime() {
|
||||||
|
return this.startTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void endTime(long endTime) {
|
||||||
|
this.endTime = endTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns operation end time in nanoseconds.
|
||||||
|
*/
|
||||||
|
public long endTime() {
|
||||||
|
return this.endTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Index extends Operation {
|
||||||
|
|
||||||
|
private final ParsedDocument doc;
|
||||||
|
|
||||||
|
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||||
|
super(uid, version, versionType, origin, startTime);
|
||||||
|
this.doc = doc;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Index(Term uid, ParsedDocument doc) {
|
||||||
|
this(uid, doc, Versions.MATCH_ANY);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Index(Term uid, ParsedDocument doc, long version) {
|
||||||
|
this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
public ParsedDocument parsedDoc() {
|
||||||
|
return this.doc;
|
||||||
|
}
|
||||||
|
|
||||||
public String type() {
|
public String type() {
|
||||||
return this.doc.type();
|
return this.doc.type();
|
||||||
}
|
}
|
||||||
@ -683,27 +717,12 @@ public abstract class Engine implements Closeable {
|
|||||||
return this.doc.ttl();
|
return this.doc.ttl();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long version() {
|
@Override
|
||||||
return this.version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void updateVersion(long version) {
|
public void updateVersion(long version) {
|
||||||
this.version = version;
|
super.updateVersion(version);
|
||||||
this.doc.version().setLongValue(version);
|
this.doc.version().setLongValue(version);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTranslogLocation(Translog.Location location) {
|
|
||||||
this.location = location;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Translog.Location getTranslogLocation() {
|
|
||||||
return this.location;
|
|
||||||
}
|
|
||||||
|
|
||||||
public VersionType versionType() {
|
|
||||||
return this.versionType;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String parent() {
|
public String parent() {
|
||||||
return this.doc.parent();
|
return this.doc.parent();
|
||||||
}
|
}
|
||||||
@ -715,96 +734,17 @@ public abstract class Engine implements Closeable {
|
|||||||
public BytesReference source() {
|
public BytesReference source() {
|
||||||
return this.doc.source();
|
return this.doc.source();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns operation start time in nanoseconds.
|
|
||||||
*/
|
|
||||||
public long startTime() {
|
|
||||||
return this.startTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void endTime(long endTime) {
|
|
||||||
this.endTime = endTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns operation end time in nanoseconds.
|
|
||||||
*/
|
|
||||||
public long endTime() {
|
|
||||||
return this.endTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Execute this operation against the provided {@link IndexShard} and
|
|
||||||
* return whether the document was created.
|
|
||||||
*/
|
|
||||||
public abstract boolean execute(IndexShard shard);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final class Create extends IndexingOperation {
|
public static class Delete extends Operation {
|
||||||
|
|
||||||
public Create(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
|
||||||
super(uid, doc, version, versionType, origin, startTime);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Create(Term uid, ParsedDocument doc) {
|
|
||||||
super(uid, doc);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Type opType() {
|
|
||||||
return Type.CREATE;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean execute(IndexShard shard) {
|
|
||||||
shard.create(this);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static final class Index extends IndexingOperation {
|
|
||||||
|
|
||||||
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
|
||||||
super(uid, doc, version, versionType, origin, startTime);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Index(Term uid, ParsedDocument doc) {
|
|
||||||
super(uid, doc);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Type opType() {
|
|
||||||
return Type.INDEX;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean execute(IndexShard shard) {
|
|
||||||
return shard.index(this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class Delete implements Operation {
|
|
||||||
private final String type;
|
private final String type;
|
||||||
private final String id;
|
private final String id;
|
||||||
private final Term uid;
|
|
||||||
private long version;
|
|
||||||
private final VersionType versionType;
|
|
||||||
private final Origin origin;
|
|
||||||
private boolean found;
|
private boolean found;
|
||||||
|
|
||||||
private final long startTime;
|
|
||||||
private long endTime;
|
|
||||||
private Translog.Location location;
|
|
||||||
|
|
||||||
public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) {
|
public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) {
|
||||||
|
super(uid, version, versionType, origin, startTime);
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.id = id;
|
this.id = id;
|
||||||
this.uid = uid;
|
|
||||||
this.version = version;
|
|
||||||
this.versionType = versionType;
|
|
||||||
this.origin = origin;
|
|
||||||
this.startTime = startTime;
|
|
||||||
this.found = found;
|
this.found = found;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -816,16 +756,6 @@ public abstract class Engine implements Closeable {
|
|||||||
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found());
|
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Type opType() {
|
|
||||||
return Type.DELETE;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Origin origin() {
|
|
||||||
return this.origin;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String type() {
|
public String type() {
|
||||||
return this.type;
|
return this.type;
|
||||||
}
|
}
|
||||||
@ -834,55 +764,14 @@ public abstract class Engine implements Closeable {
|
|||||||
return this.id;
|
return this.id;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Term uid() {
|
|
||||||
return this.uid;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void updateVersion(long version, boolean found) {
|
public void updateVersion(long version, boolean found) {
|
||||||
this.version = version;
|
updateVersion(version);
|
||||||
this.found = found;
|
this.found = found;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* before delete execution this is the version to be deleted. After this is the version of the "delete" transaction record.
|
|
||||||
*/
|
|
||||||
public long version() {
|
|
||||||
return this.version;
|
|
||||||
}
|
|
||||||
|
|
||||||
public VersionType versionType() {
|
|
||||||
return this.versionType;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean found() {
|
public boolean found() {
|
||||||
return this.found;
|
return this.found;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns operation start time in nanoseconds.
|
|
||||||
*/
|
|
||||||
public long startTime() {
|
|
||||||
return this.startTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void endTime(long endTime) {
|
|
||||||
this.endTime = endTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns operation end time in nanoseconds.
|
|
||||||
*/
|
|
||||||
public long endTime() {
|
|
||||||
return this.endTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTranslogLocation(Translog.Location location) {
|
|
||||||
this.location = location;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Translog.Location getTranslogLocation() {
|
|
||||||
return this.location;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class DeleteByQuery {
|
public static class DeleteByQuery {
|
||||||
@ -1135,12 +1024,18 @@ public abstract class Engine implements Closeable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (this == o) return true;
|
if (this == o) {
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
CommitId commitId = (CommitId) o;
|
CommitId commitId = (CommitId) o;
|
||||||
|
|
||||||
if (!Arrays.equals(id, commitId.id)) return false;
|
if (!Arrays.equals(id, commitId.id)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1151,5 +1046,6 @@ public abstract class Engine implements Closeable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void onSettingsChanged() {}
|
public void onSettingsChanged() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
|||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
import org.elasticsearch.index.translog.TranslogConfig;
|
import org.elasticsearch.index.translog.TranslogConfig;
|
||||||
import org.elasticsearch.indices.IndicesWarmer;
|
import org.elasticsearch.indices.IndicesWarmer;
|
||||||
|
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
@ -107,8 +108,6 @@ public final class EngineConfig {
|
|||||||
|
|
||||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||||
public static final ByteSizeValue DEFAULT_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);
|
|
||||||
public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER");
|
|
||||||
|
|
||||||
public static final String DEFAULT_VERSION_MAP_SIZE = "25%";
|
public static final String DEFAULT_VERSION_MAP_SIZE = "25%";
|
||||||
|
|
||||||
@ -139,7 +138,8 @@ public final class EngineConfig {
|
|||||||
this.failedEngineListener = failedEngineListener;
|
this.failedEngineListener = failedEngineListener;
|
||||||
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
|
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
|
||||||
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
||||||
indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE;
|
// We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing:
|
||||||
|
indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER;
|
||||||
gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
|
gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
|
||||||
versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
|
versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
|
||||||
updateVersionMapSize();
|
updateVersionMapSize();
|
||||||
@ -258,10 +258,10 @@ public final class EngineConfig {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about
|
* Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about
|
||||||
* pre and post index and create operations. The operations are used for statistic purposes etc.
|
* pre and post index. The operations are used for statistic purposes etc.
|
||||||
*
|
*
|
||||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#postCreate(org.elasticsearch.index.engine.Engine.Create)
|
* @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index)
|
||||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#preCreate(org.elasticsearch.index.engine.Engine.Create)
|
* @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public ShardIndexingService getIndexingService() {
|
public ShardIndexingService getIndexingService() {
|
||||||
|
@ -30,16 +30,16 @@ import java.io.IOException;
|
|||||||
*/
|
*/
|
||||||
public class EngineException extends ElasticsearchException {
|
public class EngineException extends ElasticsearchException {
|
||||||
|
|
||||||
public EngineException(ShardId shardId, String msg) {
|
public EngineException(ShardId shardId, String msg, Object... params) {
|
||||||
this(shardId, msg, null);
|
this(shardId, msg, null, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
public EngineException(ShardId shardId, String msg, Throwable cause) {
|
public EngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
||||||
super(msg, cause);
|
super(msg, cause, params);
|
||||||
setShard(shardId);
|
setShard(shardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
public EngineException(StreamInput in) throws IOException{
|
public EngineException(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -21,8 +21,9 @@ package org.elasticsearch.index.engine;
|
|||||||
|
|
||||||
import org.apache.lucene.index.*;
|
import org.apache.lucene.index.*;
|
||||||
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
|
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.*;
|
import org.apache.lucene.search.SearcherFactory;
|
||||||
|
import org.apache.lucene.search.SearcherManager;
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.LockObtainFailedException;
|
import org.apache.lucene.store.LockObtainFailedException;
|
||||||
@ -31,7 +32,7 @@ import org.apache.lucene.util.IOUtils;
|
|||||||
import org.apache.lucene.util.InfoStream;
|
import org.apache.lucene.util.InfoStream;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.cluster.routing.DjbHashFunction;
|
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
@ -48,7 +49,6 @@ import org.elasticsearch.index.indexing.ShardIndexingService;
|
|||||||
import org.elasticsearch.index.mapper.Uid;
|
import org.elasticsearch.index.mapper.Uid;
|
||||||
import org.elasticsearch.index.merge.MergeStats;
|
import org.elasticsearch.index.merge.MergeStats;
|
||||||
import org.elasticsearch.index.merge.OnGoingMerge;
|
import org.elasticsearch.index.merge.OnGoingMerge;
|
||||||
import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery;
|
|
||||||
import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
|
import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
|
||||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
@ -67,7 +67,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||||||
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
@ -182,8 +181,7 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
translogConfig.setTranslogGeneration(generation);
|
translogConfig.setTranslogGeneration(generation);
|
||||||
if (generation != null && generation.translogUUID == null) {
|
if (generation != null && generation.translogUUID == null) {
|
||||||
// only upgrade on pre-2.0 indices...
|
throw new IndexFormatTooOldException("trasnlog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first");
|
||||||
Translog.upgradeLegacyTranslog(logger, translogConfig);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final Translog translog = new Translog(translogConfig);
|
final Translog translog = new Translog(translogConfig);
|
||||||
@ -316,7 +314,8 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) {
|
if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) {
|
||||||
Uid uid = Uid.createUid(get.uid().text());
|
Uid uid = Uid.createUid(get.uid().text());
|
||||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
|
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
||||||
|
get.versionType().explainConflictForReads(versionValue.version(), get.version()));
|
||||||
}
|
}
|
||||||
Translog.Operation op = translog.read(versionValue.translogLocation());
|
Translog.Operation op = translog.read(versionValue.translogLocation());
|
||||||
if (op != null) {
|
if (op != null) {
|
||||||
@ -331,96 +330,7 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void create(Create create) throws EngineException {
|
public boolean index(Index index) {
|
||||||
try (ReleasableLock lock = readLock.acquire()) {
|
|
||||||
ensureOpen();
|
|
||||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
|
||||||
// Don't throttle recovery operations
|
|
||||||
innerCreate(create);
|
|
||||||
} else {
|
|
||||||
try (Releasable r = throttle.acquireThrottle()) {
|
|
||||||
innerCreate(create);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (OutOfMemoryError | IllegalStateException | IOException t) {
|
|
||||||
maybeFailEngine("create", t);
|
|
||||||
throw new CreateFailedEngineException(shardId, create.type(), create.id(), t);
|
|
||||||
}
|
|
||||||
checkVersionMapRefresh();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void innerCreate(Create create) throws IOException {
|
|
||||||
synchronized (dirtyLock(create.uid())) {
|
|
||||||
final long currentVersion;
|
|
||||||
final VersionValue versionValue;
|
|
||||||
versionValue = versionMap.getUnderLock(create.uid().bytes());
|
|
||||||
if (versionValue == null) {
|
|
||||||
currentVersion = loadCurrentVersionFromIndex(create.uid());
|
|
||||||
} else {
|
|
||||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
|
||||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
|
||||||
} else {
|
|
||||||
currentVersion = versionValue.version();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerCreateUnderLock(create, currentVersion, versionValue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void innerCreateUnderLock(Create create, long currentVersion, VersionValue versionValue) throws IOException {
|
|
||||||
|
|
||||||
// same logic as index
|
|
||||||
long updatedVersion;
|
|
||||||
long expectedVersion = create.version();
|
|
||||||
if (create.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
|
||||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion);
|
|
||||||
|
|
||||||
// if the doc exists
|
|
||||||
boolean doUpdate = false;
|
|
||||||
if ((versionValue != null && versionValue.delete() == false) || (versionValue == null && currentVersion != Versions.NOT_FOUND)) {
|
|
||||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
|
||||||
return;
|
|
||||||
} else if (create.origin() == Operation.Origin.REPLICA) {
|
|
||||||
// #7142: the primary already determined it's OK to index this document, and we confirmed above that the version doesn't
|
|
||||||
// conflict, so we must also update here on the replica to remain consistent:
|
|
||||||
doUpdate = true;
|
|
||||||
} else {
|
|
||||||
// On primary, we throw DAEE if the _uid is already in the index with an older version:
|
|
||||||
assert create.origin() == Operation.Origin.PRIMARY;
|
|
||||||
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
create.updateVersion(updatedVersion);
|
|
||||||
|
|
||||||
if (doUpdate) {
|
|
||||||
if (create.docs().size() > 1) {
|
|
||||||
indexWriter.updateDocuments(create.uid(), create.docs());
|
|
||||||
} else {
|
|
||||||
indexWriter.updateDocument(create.uid(), create.docs().get(0));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (create.docs().size() > 1) {
|
|
||||||
indexWriter.addDocuments(create.docs());
|
|
||||||
} else {
|
|
||||||
indexWriter.addDocument(create.docs().get(0));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Translog.Location translogLocation = translog.add(new Translog.Create(create));
|
|
||||||
|
|
||||||
versionMap.putUnderLock(create.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
|
||||||
create.setTranslogLocation(translogLocation);
|
|
||||||
indexingService.postCreateUnderLock(create);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean index(Index index) throws EngineException {
|
|
||||||
final boolean created;
|
final boolean created;
|
||||||
try (ReleasableLock lock = readLock.acquire()) {
|
try (ReleasableLock lock = readLock.acquire()) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
@ -440,6 +350,67 @@ public class InternalEngine extends Engine {
|
|||||||
return created;
|
return created;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean innerIndex(Index index) throws IOException {
|
||||||
|
synchronized (dirtyLock(index.uid())) {
|
||||||
|
final long currentVersion;
|
||||||
|
final boolean deleted;
|
||||||
|
VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes());
|
||||||
|
if (versionValue == null) {
|
||||||
|
currentVersion = loadCurrentVersionFromIndex(index.uid());
|
||||||
|
deleted = currentVersion == Versions.NOT_FOUND;
|
||||||
|
} else {
|
||||||
|
deleted = versionValue.delete();
|
||||||
|
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||||
|
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||||
|
} else {
|
||||||
|
currentVersion = versionValue.version();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
long expectedVersion = index.version();
|
||||||
|
if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
|
||||||
|
if (index.origin() == Operation.Origin.RECOVERY) {
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
throw new VersionConflictEngineException(shardId, index.type(), index.id(),
|
||||||
|
index.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
long updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
||||||
|
|
||||||
|
final boolean created;
|
||||||
|
index.updateVersion(updatedVersion);
|
||||||
|
|
||||||
|
if (currentVersion == Versions.NOT_FOUND) {
|
||||||
|
// document does not exists, we can optimize for create
|
||||||
|
created = true;
|
||||||
|
if (index.docs().size() > 1) {
|
||||||
|
indexWriter.addDocuments(index.docs());
|
||||||
|
} else {
|
||||||
|
indexWriter.addDocument(index.docs().get(0));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (versionValue != null) {
|
||||||
|
created = versionValue.delete(); // we have a delete which is not GC'ed...
|
||||||
|
} else {
|
||||||
|
created = false;
|
||||||
|
}
|
||||||
|
if (index.docs().size() > 1) {
|
||||||
|
indexWriter.updateDocuments(index.uid(), index.docs());
|
||||||
|
} else {
|
||||||
|
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Translog.Location translogLocation = translog.add(new Translog.Index(index));
|
||||||
|
|
||||||
|
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
||||||
|
index.setTranslogLocation(translogLocation);
|
||||||
|
|
||||||
|
indexingService.postIndexUnderLock(index);
|
||||||
|
return created;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Forces a refresh if the versionMap is using too much RAM
|
* Forces a refresh if the versionMap is using too much RAM
|
||||||
*/
|
*/
|
||||||
@ -467,62 +438,6 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean innerIndex(Index index) throws IOException {
|
|
||||||
synchronized (dirtyLock(index.uid())) {
|
|
||||||
final long currentVersion;
|
|
||||||
VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes());
|
|
||||||
if (versionValue == null) {
|
|
||||||
currentVersion = loadCurrentVersionFromIndex(index.uid());
|
|
||||||
} else {
|
|
||||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
|
||||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
|
||||||
} else {
|
|
||||||
currentVersion = versionValue.version();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
long updatedVersion;
|
|
||||||
long expectedVersion = index.version();
|
|
||||||
if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
|
||||||
if (index.origin() == Operation.Origin.RECOVERY) {
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
|
||||||
|
|
||||||
final boolean created;
|
|
||||||
index.updateVersion(updatedVersion);
|
|
||||||
if (currentVersion == Versions.NOT_FOUND) {
|
|
||||||
// document does not exists, we can optimize for create
|
|
||||||
created = true;
|
|
||||||
if (index.docs().size() > 1) {
|
|
||||||
indexWriter.addDocuments(index.docs());
|
|
||||||
} else {
|
|
||||||
indexWriter.addDocument(index.docs().get(0));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (versionValue != null) {
|
|
||||||
created = versionValue.delete(); // we have a delete which is not GC'ed...
|
|
||||||
} else {
|
|
||||||
created = false;
|
|
||||||
}
|
|
||||||
if (index.docs().size() > 1) {
|
|
||||||
indexWriter.updateDocuments(index.uid(), index.docs());
|
|
||||||
} else {
|
|
||||||
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Translog.Location translogLocation = translog.add(new Translog.Index(index));
|
|
||||||
|
|
||||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
|
||||||
index.setTranslogLocation(translogLocation);
|
|
||||||
indexingService.postIndexUnderLock(index);
|
|
||||||
return created;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(Delete delete) throws EngineException {
|
public void delete(Delete delete) throws EngineException {
|
||||||
try (ReleasableLock lock = readLock.acquire()) {
|
try (ReleasableLock lock = readLock.acquire()) {
|
||||||
@ -549,10 +464,13 @@ public class InternalEngine extends Engine {
|
|||||||
private void innerDelete(Delete delete) throws IOException {
|
private void innerDelete(Delete delete) throws IOException {
|
||||||
synchronized (dirtyLock(delete.uid())) {
|
synchronized (dirtyLock(delete.uid())) {
|
||||||
final long currentVersion;
|
final long currentVersion;
|
||||||
|
final boolean deleted;
|
||||||
VersionValue versionValue = versionMap.getUnderLock(delete.uid().bytes());
|
VersionValue versionValue = versionMap.getUnderLock(delete.uid().bytes());
|
||||||
if (versionValue == null) {
|
if (versionValue == null) {
|
||||||
currentVersion = loadCurrentVersionFromIndex(delete.uid());
|
currentVersion = loadCurrentVersionFromIndex(delete.uid());
|
||||||
|
deleted = currentVersion == Versions.NOT_FOUND;
|
||||||
} else {
|
} else {
|
||||||
|
deleted = versionValue.delete();
|
||||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||||
} else {
|
} else {
|
||||||
@ -562,11 +480,12 @@ public class InternalEngine extends Engine {
|
|||||||
|
|
||||||
long updatedVersion;
|
long updatedVersion;
|
||||||
long expectedVersion = delete.version();
|
long expectedVersion = delete.version();
|
||||||
if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
|
||||||
if (delete.origin() == Operation.Origin.RECOVERY) {
|
if (delete.origin() == Operation.Origin.RECOVERY) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion);
|
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
|
||||||
|
delete.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
|
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
|
||||||
@ -591,48 +510,6 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
|
||||||
@Deprecated
|
|
||||||
@Override
|
|
||||||
public void delete(DeleteByQuery delete) throws EngineException {
|
|
||||||
try (ReleasableLock lock = readLock.acquire()) {
|
|
||||||
ensureOpen();
|
|
||||||
if (delete.origin() == Operation.Origin.RECOVERY) {
|
|
||||||
// Don't throttle recovery operations
|
|
||||||
innerDelete(delete);
|
|
||||||
} else {
|
|
||||||
try (Releasable r = throttle.acquireThrottle()) {
|
|
||||||
innerDelete(delete);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void innerDelete(DeleteByQuery delete) throws EngineException {
|
|
||||||
try {
|
|
||||||
Query query = delete.query();
|
|
||||||
if (delete.aliasFilter() != null) {
|
|
||||||
query = new BooleanQuery.Builder()
|
|
||||||
.add(query, Occur.MUST)
|
|
||||||
.add(delete.aliasFilter(), Occur.FILTER)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
if (delete.nested()) {
|
|
||||||
query = new IncludeNestedDocsQuery(query, delete.parentFilter());
|
|
||||||
}
|
|
||||||
|
|
||||||
indexWriter.deleteDocuments(query);
|
|
||||||
translog.add(new Translog.DeleteByQuery(delete));
|
|
||||||
} catch (Throwable t) {
|
|
||||||
maybeFailEngine("delete_by_query", t);
|
|
||||||
throw new DeleteByQueryFailedEngineException(shardId, delete, t);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: This is heavy, since we refresh, but we must do this because we don't know which documents were in fact deleted (i.e., our
|
|
||||||
// versionMap isn't updated), so we must force a cutover to a new reader to "see" the deletions:
|
|
||||||
refresh("delete_by_query");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void refresh(String source) throws EngineException {
|
public void refresh(String source) throws EngineException {
|
||||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||||
@ -904,6 +781,11 @@ public class InternalEngine extends Engine {
|
|||||||
stats.addIndexWriterMaxMemoryInBytes((long) (indexWriter.getConfig().getRAMBufferSizeMB() * 1024 * 1024));
|
stats.addIndexWriterMaxMemoryInBytes((long) (indexWriter.getConfig().getRAMBufferSizeMB() * 1024 * 1024));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long indexWriterRAMBytesUsed() {
|
||||||
|
return indexWriter.ramBytesUsed();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Segment> segments(boolean verbose) {
|
public List<Segment> segments(boolean verbose) {
|
||||||
try (ReleasableLock lock = readLock.acquire()) {
|
try (ReleasableLock lock = readLock.acquire()) {
|
||||||
@ -974,7 +856,7 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Object dirtyLock(BytesRef uid) {
|
private Object dirtyLock(BytesRef uid) {
|
||||||
int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length);
|
int hash = Murmur3HashFunction.hash(uid.bytes, uid.offset, uid.length);
|
||||||
return dirtyLocks[MathUtils.mod(hash, dirtyLocks.length)];
|
return dirtyLocks[MathUtils.mod(hash, dirtyLocks.length)];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.engine;
|
package org.elasticsearch.index.engine;
|
||||||
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.apache.lucene.util.Accountables;
|
import org.apache.lucene.util.Accountables;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -32,7 +31,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class Segment implements Streamable {
|
public class Segment implements Streamable {
|
||||||
|
@ -102,11 +102,6 @@ public class ShadowEngine extends Engine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void create(Create create) throws EngineException {
|
|
||||||
throw new UnsupportedOperationException(shardId + " create operation not allowed on shadow engine");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean index(Index index) throws EngineException {
|
public boolean index(Index index) throws EngineException {
|
||||||
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
||||||
@ -117,13 +112,6 @@ public class ShadowEngine extends Engine {
|
|||||||
throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine");
|
throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
|
||||||
@Deprecated
|
|
||||||
@Override
|
|
||||||
public void delete(DeleteByQuery delete) throws EngineException {
|
|
||||||
throw new UnsupportedOperationException(shardId + " delete-by-query operation not allowed on shadow engine");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) {
|
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) {
|
||||||
throw new UnsupportedOperationException(shardId + " sync commit operation not allowed on shadow engine");
|
throw new UnsupportedOperationException(shardId + " sync commit operation not allowed on shadow engine");
|
||||||
@ -245,4 +233,9 @@ public class ShadowEngine extends Engine {
|
|||||||
return lastCommittedSegmentInfos;
|
return lastCommittedSegmentInfos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long indexWriterRAMBytesUsed() {
|
||||||
|
// No IndexWriter
|
||||||
|
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,16 @@ import java.io.IOException;
|
|||||||
*/
|
*/
|
||||||
public class VersionConflictEngineException extends EngineException {
|
public class VersionConflictEngineException extends EngineException {
|
||||||
|
|
||||||
public VersionConflictEngineException(ShardId shardId, String type, String id, long current, long provided) {
|
public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) {
|
||||||
super(shardId, "[" + type + "][" + id + "]: version conflict, current [" + current + "], provided [" + provided + "]");
|
this(shardId, null, type, id, explanation);
|
||||||
|
}
|
||||||
|
|
||||||
|
public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) {
|
||||||
|
this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation);
|
||||||
|
}
|
||||||
|
|
||||||
|
public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
||||||
|
super(shardId, msg, cause, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -28,39 +28,8 @@ public abstract class IndexingOperationListener {
|
|||||||
/**
|
/**
|
||||||
* Called before the indexing occurs.
|
* Called before the indexing occurs.
|
||||||
*/
|
*/
|
||||||
public Engine.Create preCreate(Engine.Create create) {
|
public Engine.Index preIndex(Engine.Index operation) {
|
||||||
return create;
|
return operation;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called after the indexing occurs, under a locking scheme to maintain
|
|
||||||
* concurrent updates to the same doc.
|
|
||||||
* <p>
|
|
||||||
* Note, long operations should not occur under this callback.
|
|
||||||
*/
|
|
||||||
public void postCreateUnderLock(Engine.Create create) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called after create index operation occurred.
|
|
||||||
*/
|
|
||||||
public void postCreate(Engine.Create create) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called after create index operation occurred with exception.
|
|
||||||
*/
|
|
||||||
public void postCreate(Engine.Create create, Throwable ex) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called before the indexing occurs.
|
|
||||||
*/
|
|
||||||
public Engine.Index preIndex(Engine.Index index) {
|
|
||||||
return index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -128,10 +128,6 @@ public final class IndexingSlowLog {
|
|||||||
postIndexing(index.parsedDoc(), tookInNanos);
|
postIndexing(index.parsedDoc(), tookInNanos);
|
||||||
}
|
}
|
||||||
|
|
||||||
void postCreate(Engine.Create create, long tookInNanos) {
|
|
||||||
postIndexing(create.parsedDoc(), tookInNanos);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads how much of the source to log. The user can specify any value they
|
* Reads how much of the source to log. The user can specify any value they
|
||||||
* like and numbers are interpreted the maximum number of characters to log
|
* like and numbers are interpreted the maximum number of characters to log
|
||||||
|
@ -86,25 +86,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||||||
listeners.remove(listener);
|
listeners.remove(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Engine.Create preCreate(Engine.Create create) {
|
|
||||||
totalStats.indexCurrent.inc();
|
|
||||||
typeStats(create.type()).indexCurrent.inc();
|
|
||||||
for (IndexingOperationListener listener : listeners) {
|
|
||||||
create = listener.preCreate(create);
|
|
||||||
}
|
|
||||||
return create;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void postCreateUnderLock(Engine.Create create) {
|
|
||||||
for (IndexingOperationListener listener : listeners) {
|
|
||||||
try {
|
|
||||||
listener.postCreateUnderLock(create);
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.warn("postCreateUnderLock listener [{}] failed", e, listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void throttlingActivated() {
|
public void throttlingActivated() {
|
||||||
totalStats.setThrottled(true);
|
totalStats.setThrottled(true);
|
||||||
}
|
}
|
||||||
@ -113,40 +94,13 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||||||
totalStats.setThrottled(false);
|
totalStats.setThrottled(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void postCreate(Engine.Create create) {
|
public Engine.Index preIndex(Engine.Index operation) {
|
||||||
long took = create.endTime() - create.startTime();
|
|
||||||
totalStats.indexMetric.inc(took);
|
|
||||||
totalStats.indexCurrent.dec();
|
|
||||||
StatsHolder typeStats = typeStats(create.type());
|
|
||||||
typeStats.indexMetric.inc(took);
|
|
||||||
typeStats.indexCurrent.dec();
|
|
||||||
slowLog.postCreate(create, took);
|
|
||||||
for (IndexingOperationListener listener : listeners) {
|
|
||||||
try {
|
|
||||||
listener.postCreate(create);
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.warn("postCreate listener [{}] failed", e, listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void postCreate(Engine.Create create, Throwable ex) {
|
|
||||||
for (IndexingOperationListener listener : listeners) {
|
|
||||||
try {
|
|
||||||
listener.postCreate(create, ex);
|
|
||||||
} catch (Throwable t) {
|
|
||||||
logger.warn("postCreate listener [{}] failed", t, listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Engine.Index preIndex(Engine.Index index) {
|
|
||||||
totalStats.indexCurrent.inc();
|
totalStats.indexCurrent.inc();
|
||||||
typeStats(index.type()).indexCurrent.inc();
|
typeStats(operation.type()).indexCurrent.inc();
|
||||||
for (IndexingOperationListener listener : listeners) {
|
for (IndexingOperationListener listener : listeners) {
|
||||||
index = listener.preIndex(index);
|
operation = listener.preIndex(operation);
|
||||||
}
|
}
|
||||||
return index;
|
return operation;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void postIndexUnderLock(Engine.Index index) {
|
public void postIndexUnderLock(Engine.Index index) {
|
||||||
|
@ -64,7 +64,7 @@ import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
|||||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||||
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
import org.elasticsearch.script.Script;
|
import org.elasticsearch.script.Script;
|
||||||
import org.elasticsearch.script.ScriptService;
|
import org.elasticsearch.script.ScriptService;
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ public class DocumentMapperParser {
|
|||||||
final MapperService mapperService;
|
final MapperService mapperService;
|
||||||
final AnalysisService analysisService;
|
final AnalysisService analysisService;
|
||||||
private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class);
|
private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class);
|
||||||
private final SimilarityLookupService similarityLookupService;
|
private final SimilarityService similarityService;
|
||||||
private final ScriptService scriptService;
|
private final ScriptService scriptService;
|
||||||
|
|
||||||
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
|
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
|
||||||
@ -100,12 +100,12 @@ public class DocumentMapperParser {
|
|||||||
private volatile SortedMap<String, Mapper.TypeParser> additionalRootMappers;
|
private volatile SortedMap<String, Mapper.TypeParser> additionalRootMappers;
|
||||||
|
|
||||||
public DocumentMapperParser(@IndexSettings Settings indexSettings, MapperService mapperService, AnalysisService analysisService,
|
public DocumentMapperParser(@IndexSettings Settings indexSettings, MapperService mapperService, AnalysisService analysisService,
|
||||||
SimilarityLookupService similarityLookupService, ScriptService scriptService) {
|
SimilarityService similarityService, ScriptService scriptService) {
|
||||||
this.indexSettings = indexSettings;
|
this.indexSettings = indexSettings;
|
||||||
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings);
|
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings);
|
||||||
this.mapperService = mapperService;
|
this.mapperService = mapperService;
|
||||||
this.analysisService = analysisService;
|
this.analysisService = analysisService;
|
||||||
this.similarityLookupService = similarityLookupService;
|
this.similarityService = similarityService;
|
||||||
this.scriptService = scriptService;
|
this.scriptService = scriptService;
|
||||||
Map<String, Mapper.TypeParser> typeParsers = new HashMap<>();
|
Map<String, Mapper.TypeParser> typeParsers = new HashMap<>();
|
||||||
typeParsers.put(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser());
|
typeParsers.put(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser());
|
||||||
@ -170,7 +170,7 @@ public class DocumentMapperParser {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Mapper.TypeParser.ParserContext parserContext(String type) {
|
public Mapper.TypeParser.ParserContext parserContext(String type) {
|
||||||
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityLookupService, mapperService, typeParsers, indexVersionCreated, parseFieldMatcher);
|
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocumentMapper parse(String source) throws MapperParsingException {
|
public DocumentMapper parse(String source) throws MapperParsingException {
|
||||||
|
@ -122,7 +122,7 @@ class DocumentParser implements Closeable {
|
|||||||
// entire type is disabled
|
// entire type is disabled
|
||||||
parser.skipChildren();
|
parser.skipChildren();
|
||||||
} else if (emptyDoc == false) {
|
} else if (emptyDoc == false) {
|
||||||
Mapper update = parseObject(context, mapping.root);
|
Mapper update = parseObject(context, mapping.root, true);
|
||||||
if (update != null) {
|
if (update != null) {
|
||||||
context.addDynamicMappingsUpdate(update);
|
context.addDynamicMappingsUpdate(update);
|
||||||
}
|
}
|
||||||
@ -194,7 +194,7 @@ class DocumentParser implements Closeable {
|
|||||||
return doc;
|
return doc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper) throws IOException {
|
static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException {
|
||||||
if (mapper.isEnabled() == false) {
|
if (mapper.isEnabled() == false) {
|
||||||
context.parser().skipChildren();
|
context.parser().skipChildren();
|
||||||
return null;
|
return null;
|
||||||
@ -202,6 +202,10 @@ class DocumentParser implements Closeable {
|
|||||||
XContentParser parser = context.parser();
|
XContentParser parser = context.parser();
|
||||||
|
|
||||||
String currentFieldName = parser.currentName();
|
String currentFieldName = parser.currentName();
|
||||||
|
if (atRoot && MapperService.isMetadataField(currentFieldName) &&
|
||||||
|
Version.indexCreated(context.indexSettings()).onOrAfter(Version.V_2_0_0_beta1)) {
|
||||||
|
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters.");
|
||||||
|
}
|
||||||
XContentParser.Token token = parser.currentToken();
|
XContentParser.Token token = parser.currentToken();
|
||||||
if (token == XContentParser.Token.VALUE_NULL) {
|
if (token == XContentParser.Token.VALUE_NULL) {
|
||||||
// the object is null ("obj1" : null), simply bail
|
// the object is null ("obj1" : null), simply bail
|
||||||
@ -302,7 +306,7 @@ class DocumentParser implements Closeable {
|
|||||||
|
|
||||||
private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException {
|
private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException {
|
||||||
if (mapper instanceof ObjectMapper) {
|
if (mapper instanceof ObjectMapper) {
|
||||||
return parseObject(context, (ObjectMapper) mapper);
|
return parseObject(context, (ObjectMapper) mapper, false);
|
||||||
} else {
|
} else {
|
||||||
FieldMapper fieldMapper = (FieldMapper)mapper;
|
FieldMapper fieldMapper = (FieldMapper)mapper;
|
||||||
Mapper update = fieldMapper.parse(context);
|
Mapper update = fieldMapper.parse(context);
|
||||||
|
@ -34,8 +34,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer;
|
|||||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||||
import org.elasticsearch.index.mapper.core.TypeParsers;
|
import org.elasticsearch.index.mapper.core.TypeParsers;
|
||||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
|
||||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||||
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -447,7 +447,7 @@ public abstract class FieldMapper extends Mapper {
|
|||||||
if (fieldType().similarity() != null) {
|
if (fieldType().similarity() != null) {
|
||||||
builder.field("similarity", fieldType().similarity().name());
|
builder.field("similarity", fieldType().similarity().name());
|
||||||
} else if (includeDefaults) {
|
} else if (includeDefaults) {
|
||||||
builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY);
|
builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (includeDefaults || hasCustomFieldDataSettings()) {
|
if (includeDefaults || hasCustomFieldDataSettings()) {
|
||||||
|
@ -26,9 +26,10 @@ import org.elasticsearch.common.Strings;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.index.analysis.AnalysisService;
|
import org.elasticsearch.index.analysis.AnalysisService;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
||||||
|
|
||||||
@ -84,18 +85,18 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||||||
|
|
||||||
private final AnalysisService analysisService;
|
private final AnalysisService analysisService;
|
||||||
|
|
||||||
private final SimilarityLookupService similarityLookupService;
|
private final Function<String, SimilarityProvider> similarityLookupService;
|
||||||
|
|
||||||
private final MapperService mapperService;
|
private final MapperService mapperService;
|
||||||
|
|
||||||
private final Map<String, TypeParser> typeParsers;
|
private final Function<String, TypeParser> typeParsers;
|
||||||
|
|
||||||
private final Version indexVersionCreated;
|
private final Version indexVersionCreated;
|
||||||
|
|
||||||
private final ParseFieldMatcher parseFieldMatcher;
|
private final ParseFieldMatcher parseFieldMatcher;
|
||||||
|
|
||||||
public ParserContext(String type, AnalysisService analysisService, SimilarityLookupService similarityLookupService,
|
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
|
||||||
MapperService mapperService, Map<String, TypeParser> typeParsers,
|
MapperService mapperService, Function<String, TypeParser> typeParsers,
|
||||||
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) {
|
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.analysisService = analysisService;
|
this.analysisService = analysisService;
|
||||||
@ -114,8 +115,8 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||||||
return analysisService;
|
return analysisService;
|
||||||
}
|
}
|
||||||
|
|
||||||
public SimilarityLookupService similarityLookupService() {
|
public SimilarityProvider getSimilarity(String name) {
|
||||||
return similarityLookupService;
|
return similarityLookupService.apply(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public MapperService mapperService() {
|
public MapperService mapperService() {
|
||||||
@ -123,7 +124,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public TypeParser typeParser(String type) {
|
public TypeParser typeParser(String type) {
|
||||||
return typeParsers.get(Strings.toUnderscoreCase(type));
|
return typeParsers.apply(Strings.toUnderscoreCase(type));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Version indexVersionCreated() {
|
public Version indexVersionCreated() {
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
package org.elasticsearch.index.mapper;
|
package org.elasticsearch.index.mapper;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectHashSet;
|
import com.carrotsearch.hppc.ObjectHashSet;
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||||
@ -51,7 +50,7 @@ import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
|||||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||||
import org.elasticsearch.indices.TypeMissingException;
|
import org.elasticsearch.indices.TypeMissingException;
|
||||||
import org.elasticsearch.percolator.PercolatorService;
|
import org.elasticsearch.percolator.PercolatorService;
|
||||||
@ -65,13 +64,13 @@ import java.util.Collection;
|
|||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
import static java.util.Collections.emptyMap;
|
||||||
import static java.util.Collections.emptySet;
|
import static java.util.Collections.emptySet;
|
||||||
@ -126,12 +125,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService,
|
public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService,
|
||||||
SimilarityLookupService similarityLookupService,
|
SimilarityService similarityService,
|
||||||
ScriptService scriptService) {
|
ScriptService scriptService) {
|
||||||
super(index, indexSettings);
|
super(index, indexSettings);
|
||||||
this.analysisService = analysisService;
|
this.analysisService = analysisService;
|
||||||
this.fieldTypes = new FieldTypeLookup();
|
this.fieldTypes = new FieldTypeLookup();
|
||||||
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityLookupService, scriptService);
|
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, scriptService);
|
||||||
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
|
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
|
||||||
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
|
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
|
||||||
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
|
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
|
||||||
@ -186,13 +185,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||||||
*/
|
*/
|
||||||
public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
|
public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
|
||||||
return () -> {
|
return () -> {
|
||||||
final Iterator<DocumentMapper> iterator;
|
final Collection<DocumentMapper> documentMappers;
|
||||||
if (includingDefaultMapping) {
|
if (includingDefaultMapping) {
|
||||||
iterator = mappers.values().iterator();
|
documentMappers = mappers.values();
|
||||||
} else {
|
} else {
|
||||||
iterator = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).iterator();
|
documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
return Iterators.unmodifiableIterator(iterator);
|
return Collections.unmodifiableCollection(documentMappers).iterator();
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +79,6 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BinaryFieldMapper build(BuilderContext context) {
|
public BinaryFieldMapper build(BuilderContext context) {
|
||||||
setupFieldType(context);
|
setupFieldType(context);
|
||||||
((BinaryFieldType)fieldType).setTryUncompressing(context.indexCreatedVersion().before(Version.V_2_0_0_beta1));
|
|
||||||
return new BinaryFieldMapper(name, fieldType, defaultFieldType,
|
return new BinaryFieldMapper(name, fieldType, defaultFieldType,
|
||||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||||
}
|
}
|
||||||
@ -103,13 +102,11 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static final class BinaryFieldType extends MappedFieldType {
|
static final class BinaryFieldType extends MappedFieldType {
|
||||||
private boolean tryUncompressing = false;
|
|
||||||
|
|
||||||
public BinaryFieldType() {}
|
public BinaryFieldType() {}
|
||||||
|
|
||||||
protected BinaryFieldType(BinaryFieldType ref) {
|
protected BinaryFieldType(BinaryFieldType ref) {
|
||||||
super(ref);
|
super(ref);
|
||||||
this.tryUncompressing = ref.tryUncompressing;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -117,40 +114,12 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||||||
return new BinaryFieldType(this);
|
return new BinaryFieldType(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object o) {
|
|
||||||
if (!super.equals(o)) return false;
|
|
||||||
BinaryFieldType that = (BinaryFieldType) o;
|
|
||||||
return Objects.equals(tryUncompressing, that.tryUncompressing);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return Objects.hash(super.hashCode(), tryUncompressing);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String typeName() {
|
public String typeName() {
|
||||||
return CONTENT_TYPE;
|
return CONTENT_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
|
|
||||||
super.checkCompatibility(fieldType, conflicts, strict);
|
|
||||||
BinaryFieldType other = (BinaryFieldType)fieldType;
|
|
||||||
if (tryUncompressing() != other.tryUncompressing()) {
|
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different [try_uncompressing] (IMPOSSIBLE)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean tryUncompressing() {
|
|
||||||
return tryUncompressing;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setTryUncompressing(boolean tryUncompressing) {
|
|
||||||
checkIfFrozen();
|
|
||||||
this.tryUncompressing = tryUncompressing;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BytesReference value(Object value) {
|
public BytesReference value(Object value) {
|
||||||
@ -172,15 +141,7 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||||||
throw new ElasticsearchParseException("failed to convert bytes", e);
|
throw new ElasticsearchParseException("failed to convert bytes", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
return bytes;
|
||||||
if (tryUncompressing) { // backcompat behavior
|
|
||||||
return CompressorFactory.uncompressIfNeeded(bytes);
|
|
||||||
} else {
|
|
||||||
return bytes;
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new ElasticsearchParseException("failed to decompress source", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.mapper.core;
|
package org.elasticsearch.index.mapper.core;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.DoubleArrayList;
|
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
@ -36,8 +34,6 @@ import org.elasticsearch.common.Explicit;
|
|||||||
import org.elasticsearch.common.Numbers;
|
import org.elasticsearch.common.Numbers;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.Fuzziness;
|
import org.elasticsearch.common.unit.Fuzziness;
|
||||||
import org.elasticsearch.common.util.ByteUtils;
|
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
@ -286,17 +282,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
fields.add(field);
|
fields.add(field);
|
||||||
}
|
}
|
||||||
if (fieldType().hasDocValues()) {
|
if (fieldType().hasDocValues()) {
|
||||||
if (useSortedNumericDocValues) {
|
addDocValue(context, fields, doubleToSortableLong(value));
|
||||||
addDocValue(context, fields, doubleToSortableLong(value));
|
|
||||||
} else {
|
|
||||||
CustomDoubleNumericDocValuesField field = (CustomDoubleNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName());
|
|
||||||
if (field != null) {
|
|
||||||
field.add(value);
|
|
||||||
} else {
|
|
||||||
field = new CustomDoubleNumericDocValuesField(fieldType().names().indexName(), value);
|
|
||||||
context.doc().addWithKey(fieldType().names().indexName(), field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,30 +332,4 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class CustomDoubleNumericDocValuesField extends CustomNumericDocValuesField {
|
|
||||||
|
|
||||||
private final DoubleArrayList values;
|
|
||||||
|
|
||||||
public CustomDoubleNumericDocValuesField(String name, double value) {
|
|
||||||
super(name);
|
|
||||||
values = new DoubleArrayList();
|
|
||||||
add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void add(double value) {
|
|
||||||
values.add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BytesRef binaryValue() {
|
|
||||||
CollectionUtils.sortAndDedup(values);
|
|
||||||
|
|
||||||
final byte[] bytes = new byte[values.size() * 8];
|
|
||||||
for (int i = 0; i < values.size(); ++i) {
|
|
||||||
ByteUtils.writeDoubleLE(values.get(i), bytes, i * 8);
|
|
||||||
}
|
|
||||||
return new BytesRef(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.mapper.core;
|
package org.elasticsearch.index.mapper.core;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.FloatArrayList;
|
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
@ -37,8 +35,6 @@ import org.elasticsearch.common.Numbers;
|
|||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.Fuzziness;
|
import org.elasticsearch.common.unit.Fuzziness;
|
||||||
import org.elasticsearch.common.util.ByteUtils;
|
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
@ -298,17 +294,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
fields.add(field);
|
fields.add(field);
|
||||||
}
|
}
|
||||||
if (fieldType().hasDocValues()) {
|
if (fieldType().hasDocValues()) {
|
||||||
if (useSortedNumericDocValues) {
|
addDocValue(context, fields, floatToSortableInt(value));
|
||||||
addDocValue(context, fields, floatToSortableInt(value));
|
|
||||||
} else {
|
|
||||||
CustomFloatNumericDocValuesField field = (CustomFloatNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName());
|
|
||||||
if (field != null) {
|
|
||||||
field.add(value);
|
|
||||||
} else {
|
|
||||||
field = new CustomFloatNumericDocValuesField(fieldType().names().indexName(), value);
|
|
||||||
context.doc().addWithKey(fieldType().names().indexName(), field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,31 +343,4 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
return Float.toString(number);
|
return Float.toString(number);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class CustomFloatNumericDocValuesField extends CustomNumericDocValuesField {
|
|
||||||
|
|
||||||
private final FloatArrayList values;
|
|
||||||
|
|
||||||
public CustomFloatNumericDocValuesField(String name, float value) {
|
|
||||||
super(name);
|
|
||||||
values = new FloatArrayList();
|
|
||||||
add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void add(float value) {
|
|
||||||
values.add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BytesRef binaryValue() {
|
|
||||||
CollectionUtils.sortAndDedup(values);
|
|
||||||
|
|
||||||
final byte[] bytes = new byte[values.size() * 4];
|
|
||||||
for (int i = 0; i < values.size(); ++i) {
|
|
||||||
ByteUtils.writeFloatLE(values.get(i), bytes, i * 4);
|
|
||||||
}
|
|
||||||
return new BytesRef(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.mapper.core;
|
package org.elasticsearch.index.mapper.core;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.LongArrayList;
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.NumericTokenStream;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
@ -31,14 +30,10 @@ import org.apache.lucene.index.IndexOptions;
|
|||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.IndexableFieldType;
|
import org.apache.lucene.index.IndexableFieldType;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.Fuzziness;
|
import org.elasticsearch.common.unit.Fuzziness;
|
||||||
import org.elasticsearch.common.util.ByteUtils;
|
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
import org.elasticsearch.index.mapper.*;
|
import org.elasticsearch.index.mapper.*;
|
||||||
@ -170,21 +165,12 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
|
|
||||||
protected Explicit<Boolean> coerce;
|
protected Explicit<Boolean> coerce;
|
||||||
|
|
||||||
/**
|
|
||||||
* True if index version is 1.4+
|
|
||||||
* <p>
|
|
||||||
* In this case numerics are encoded with SORTED_NUMERIC docvalues,
|
|
||||||
* otherwise for older indexes we must continue to write BINARY (for now)
|
|
||||||
*/
|
|
||||||
protected final boolean useSortedNumericDocValues;
|
|
||||||
|
|
||||||
protected NumberFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
protected NumberFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||||
Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Settings indexSettings,
|
Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, Settings indexSettings,
|
||||||
MultiFields multiFields, CopyTo copyTo) {
|
MultiFields multiFields, CopyTo copyTo) {
|
||||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||||
this.ignoreMalformed = ignoreMalformed;
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
this.coerce = coerce;
|
this.coerce = coerce;
|
||||||
this.useSortedNumericDocValues = Version.indexCreated(indexSettings).onOrAfter(Version.V_1_4_0_Beta1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -225,17 +211,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
protected abstract void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException;
|
protected abstract void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException;
|
||||||
|
|
||||||
protected final void addDocValue(ParseContext context, List<Field> fields, long value) {
|
protected final void addDocValue(ParseContext context, List<Field> fields, long value) {
|
||||||
if (useSortedNumericDocValues) {
|
fields.add(new SortedNumericDocValuesField(fieldType().names().indexName(), value));
|
||||||
fields.add(new SortedNumericDocValuesField(fieldType().names().indexName(), value));
|
|
||||||
} else {
|
|
||||||
CustomLongNumericDocValuesField field = (CustomLongNumericDocValuesField) context.doc().getByKey(fieldType().names().indexName());
|
|
||||||
if (field != null) {
|
|
||||||
field.add(value);
|
|
||||||
} else {
|
|
||||||
field = new CustomLongNumericDocValuesField(fieldType().names().indexName(), value);
|
|
||||||
context.doc().addWithKey(fieldType().names().indexName(), field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -414,40 +390,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static class CustomLongNumericDocValuesField extends CustomNumericDocValuesField {
|
|
||||||
|
|
||||||
private final LongArrayList values;
|
|
||||||
|
|
||||||
public CustomLongNumericDocValuesField(String name, long value) {
|
|
||||||
super(name);
|
|
||||||
values = new LongArrayList();
|
|
||||||
add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void add(long value) {
|
|
||||||
values.add(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BytesRef binaryValue() {
|
|
||||||
CollectionUtils.sortAndDedup(values);
|
|
||||||
|
|
||||||
// here is the trick:
|
|
||||||
// - the first value is zig-zag encoded so that eg. -5 would become positive and would be better compressed by vLong
|
|
||||||
// - for other values, we only encode deltas using vLong
|
|
||||||
final byte[] bytes = new byte[values.size() * ByteUtils.MAX_BYTES_VLONG];
|
|
||||||
final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes);
|
|
||||||
ByteUtils.writeVLong(out, ByteUtils.zigZagEncode(values.get(0)));
|
|
||||||
for (int i = 1; i < values.size(); ++i) {
|
|
||||||
final long delta = values.get(i) - values.get(i - 1);
|
|
||||||
ByteUtils.writeVLong(out, delta);
|
|
||||||
}
|
|
||||||
return new BytesRef(bytes, 0, out.getPosition());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||||
super.doXContentBody(builder, includeDefaults, params);
|
super.doXContentBody(builder, includeDefaults, params);
|
||||||
|
@ -173,7 +173,7 @@ public class TypeParsers {
|
|||||||
builder.omitNorms(nodeBooleanValue(propNode));
|
builder.omitNorms(nodeBooleanValue(propNode));
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (propName.equals("similarity")) {
|
} else if (propName.equals("similarity")) {
|
||||||
builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
|
builder.similarity(parserContext.getSimilarity(propNode.toString()));
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
@ -277,7 +277,7 @@ public class TypeParsers {
|
|||||||
// ignore for old indexes
|
// ignore for old indexes
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (propName.equals("similarity")) {
|
} else if (propName.equals("similarity")) {
|
||||||
builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
|
builder.similarity(parserContext.getSimilarity(propNode.toString()));
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (propName.equals("fielddata")) {
|
} else if (propName.equals("fielddata")) {
|
||||||
final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build();
|
final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build();
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.geo;
|
|||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectHashSet;
|
import com.carrotsearch.hppc.ObjectHashSet;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
@ -30,6 +29,7 @@ import org.apache.lucene.util.XGeoHashUtils;
|
|||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.geo.GeoDistance;
|
import org.elasticsearch.common.geo.GeoDistance;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.geo.GeoUtils;
|
import org.elasticsearch.common.geo.GeoUtils;
|
||||||
@ -39,14 +39,7 @@ import org.elasticsearch.common.util.ByteUtils;
|
|||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||||
import org.elasticsearch.index.mapper.ContentPath;
|
import org.elasticsearch.index.mapper.*;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
|
||||||
import org.elasticsearch.index.mapper.Mapper;
|
|
||||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
|
||||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
|
||||||
import org.elasticsearch.index.mapper.MergeResult;
|
|
||||||
import org.elasticsearch.index.mapper.ParseContext;
|
|
||||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
||||||
@ -54,18 +47,10 @@ import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
|||||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
import static org.elasticsearch.index.mapper.MapperBuilders.*;
|
||||||
import static org.elasticsearch.index.mapper.MapperBuilders.geoPointField;
|
import static org.elasticsearch.index.mapper.core.TypeParsers.*;
|
||||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
|
||||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
|
||||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
|
||||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parsing: We handle:
|
* Parsing: We handle:
|
||||||
|
@ -41,7 +41,7 @@ import org.elasticsearch.index.mapper.MergeResult;
|
|||||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.ParseContext;
|
import org.elasticsearch.index.mapper.ParseContext;
|
||||||
import org.elasticsearch.index.query.QueryShardContext;
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
@ -300,7 +300,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||||||
if (fieldType().similarity() != null) {
|
if (fieldType().similarity() != null) {
|
||||||
builder.field("similarity", fieldType().similarity().name());
|
builder.field("similarity", fieldType().similarity().name());
|
||||||
} else if (includeDefaults) {
|
} else if (includeDefaults) {
|
||||||
builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY);
|
builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user