merge from master

This commit is contained in:
Boaz Leskes 2016-01-11 10:00:10 +01:00
commit 4d0feff2ef
239 changed files with 4604 additions and 7689 deletions

View File

@ -45,6 +45,7 @@ org.apache.lucene.search.NumericRangeFilter
org.apache.lucene.search.PrefixFilter org.apache.lucene.search.PrefixFilter
org.apache.lucene.search.QueryWrapperFilter org.apache.lucene.search.QueryWrapperFilter
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
@ -125,4 +126,6 @@ java.util.Collections#EMPTY_MAP
java.util.Collections#EMPTY_SET java.util.Collections#EMPTY_SET
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
java.util.Random#<init>() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness @defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom

View File

@ -102,8 +102,8 @@ if (isEclipse) {
} }
} }
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked" compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked" compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
forbiddenPatterns { forbiddenPatterns {
exclude '**/*.json' exclude '**/*.json'

View File

@ -566,7 +566,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90), REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91), AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92), DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
INDEX_WARMER_MISSING_EXCEPTION(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, org.elasticsearch.search.warmer.IndexWarmerMissingException::new, 93), // 93 used to be for IndexWarmerMissingException
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94), NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96), INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97), ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),

View File

@ -25,7 +25,6 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
@ -286,7 +285,8 @@ public class Version {
public static final Version CURRENT = V_3_0_0; public static final Version CURRENT = V_3_0_0;
static { static {
assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]"; assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
} }
public static Version readVersion(StreamInput in) throws IOException { public static Version readVersion(StreamInput in) throws IOException {
@ -457,7 +457,6 @@ public class Version {
return V_0_90_0_RC1; return V_0_90_0_RC1;
case V_0_90_0_Beta1_ID: case V_0_90_0_Beta1_ID:
return V_0_90_0_Beta1; return V_0_90_0_Beta1;
case V_0_20_7_ID: case V_0_20_7_ID:
return V_0_20_7; return V_0_20_7;
case V_0_20_6_ID: case V_0_20_6_ID:
@ -476,7 +475,6 @@ public class Version {
return V_0_20_0; return V_0_20_0;
case V_0_20_0_RC1_ID: case V_0_20_0_RC1_ID:
return V_0_20_0_RC1; return V_0_20_0_RC1;
case V_0_19_0_RC1_ID: case V_0_19_0_RC1_ID:
return V_0_19_0_RC1; return V_0_19_0_RC1;
case V_0_19_0_RC2_ID: case V_0_19_0_RC2_ID:
@ -511,7 +509,6 @@ public class Version {
return V_0_19_12; return V_0_19_12;
case V_0_19_13_ID: case V_0_19_13_ID:
return V_0_19_13; return V_0_19_13;
case V_0_18_0_ID: case V_0_18_0_ID:
return V_0_18_0; return V_0_18_0;
case V_0_18_1_ID: case V_0_18_1_ID:
@ -530,9 +527,8 @@ public class Version {
return V_0_18_7; return V_0_18_7;
case V_0_18_8_ID: case V_0_18_8_ID:
return V_0_18_8; return V_0_18_8;
default: default:
return new Version(id, false, Lucene.VERSION); return new Version(id, false, org.apache.lucene.util.Version.LATEST);
} }
} }

View File

@ -127,12 +127,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction;
import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportBulkAction;
import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction;
@ -304,9 +298,6 @@ public class ActionModule extends AbstractModule {
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);

View File

@ -35,7 +35,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable {
/** /**
* Sets the array of aliases that the action relates to * Sets the array of aliases that the action relates to
*/ */
AliasesRequest aliases(String[] aliases); AliasesRequest aliases(String... aliases);
/** /**
* Returns true if wildcards expressions among aliases should be resolved, false otherwise * Returns true if wildcards expressions among aliases should be resolved, false otherwise

View File

@ -41,9 +41,9 @@ public interface IndicesRequest {
IndicesOptions indicesOptions(); IndicesOptions indicesOptions();
static interface Replaceable extends IndicesRequest { static interface Replaceable extends IndicesRequest {
/* /**
* Sets the array of indices that the action relates to * Sets the indices that the action relates to.
*/ */
IndicesRequest indices(String[] indices); IndicesRequest indices(String... indices);
} }
} }

View File

@ -61,7 +61,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
} }
@Override @Override
public ClusterHealthRequest indices(String[] indices) { public ClusterHealthRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
return this; return this;
} }

View File

@ -51,7 +51,7 @@ public class IndicesExistsRequest extends MasterNodeReadRequest<IndicesExistsReq
} }
@Override @Override
public IndicesExistsRequest indices(String[] indices) { public IndicesExistsRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
return this; return this;
} }

View File

@ -52,7 +52,7 @@ public class TypesExistsRequest extends MasterNodeReadRequest<TypesExistsRequest
} }
@Override @Override
public TypesExistsRequest indices(String[] indices) { public TypesExistsRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
return this; return this;
} }

View File

@ -37,8 +37,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
public static enum Feature { public static enum Feature {
ALIASES((byte) 0, "_aliases", "_alias"), ALIASES((byte) 0, "_aliases", "_alias"),
MAPPINGS((byte) 1, "_mappings", "_mapping"), MAPPINGS((byte) 1, "_mappings", "_mapping"),
SETTINGS((byte) 2, "_settings"), SETTINGS((byte) 2, "_settings");
WARMERS((byte) 3, "_warmers", "_warmer");
private static final Feature[] FEATURES = new Feature[Feature.values().length]; private static final Feature[] FEATURES = new Feature[Feature.values().length];
@ -97,7 +96,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
} }
} }
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS, Feature.WARMERS }; private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
private Feature[] features = DEFAULT_FEATURES; private Feature[] features = DEFAULT_FEATURES;
private boolean humanReadable = false; private boolean humanReadable = false;

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -39,19 +38,15 @@ import java.util.List;
*/ */
public class GetIndexResponse extends ActionResponse { public class GetIndexResponse extends ActionResponse {
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of(); private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of();
private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
private ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of(); private ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
private String[] indices; private String[] indices;
GetIndexResponse(String[] indices, ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers, GetIndexResponse(String[] indices,
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings, ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings,
ImmutableOpenMap<String, List<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) { ImmutableOpenMap<String, List<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) {
this.indices = indices; this.indices = indices;
if (warmers != null) {
this.warmers = warmers;
}
if (mappings != null) { if (mappings != null) {
this.mappings = mappings; this.mappings = mappings;
} }
@ -74,14 +69,6 @@ public class GetIndexResponse extends ActionResponse {
return indices(); return indices();
} }
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
return warmers;
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
return warmers();
}
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings() { public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings() {
return mappings; return mappings;
} }
@ -110,23 +97,6 @@ public class GetIndexResponse extends ActionResponse {
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
this.indices = in.readStringArray(); this.indices = in.readStringArray();
int warmersSize = in.readVInt();
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> warmersMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < warmersSize; i++) {
String key = in.readString();
int valueSize = in.readVInt();
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
for (int j = 0; j < valueSize; j++) {
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
in.readString(),
in.readStringArray(),
in.readOptionalBoolean(),
in.readBoolean() ? new IndexWarmersMetaData.SearchSource(in) : null)
);
}
warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
}
warmers = warmersMapBuilder.build();
int mappingsSize = in.readVInt(); int mappingsSize = in.readVInt();
ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> mappingsMapBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> mappingsMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < mappingsSize; i++) { for (int i = 0; i < mappingsSize; i++) {
@ -164,21 +134,6 @@ public class GetIndexResponse extends ActionResponse {
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeStringArray(indices); out.writeStringArray(indices);
out.writeVInt(warmers.size());
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
out.writeString(indexEntry.key);
out.writeVInt(indexEntry.value.size());
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
out.writeString(warmerEntry.name());
out.writeStringArray(warmerEntry.types());
out.writeOptionalBoolean(warmerEntry.requestCache());
boolean hasSource = warmerEntry.source() != null;
out.writeBoolean(hasSource);
if (hasSource) {
warmerEntry.source().writeTo(out);
}
}
}
out.writeVInt(mappings.size()); out.writeVInt(mappings.size());
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) { for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) {
out.writeString(indexEntry.key); out.writeString(indexEntry.key);

View File

@ -36,7 +36,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -72,7 +71,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override @Override
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state, protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
final ActionListener<GetIndexResponse> listener) { final ActionListener<GetIndexResponse> listener) {
ImmutableOpenMap<String, List<Entry>> warmersResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of(); ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, List<AliasMetaData>> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap<String, List<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of(); ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
@ -80,15 +78,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
boolean doneAliases = false; boolean doneAliases = false;
boolean doneMappings = false; boolean doneMappings = false;
boolean doneSettings = false; boolean doneSettings = false;
boolean doneWarmers = false;
for (Feature feature : features) { for (Feature feature : features) {
switch (feature) { switch (feature) {
case WARMERS:
if (!doneWarmers) {
warmersResult = state.metaData().findWarmers(concreteIndices, request.types(), Strings.EMPTY_ARRAY);
doneWarmers = true;
}
break;
case MAPPINGS: case MAPPINGS:
if (!doneMappings) { if (!doneMappings) {
mappingsResult = state.metaData().findMappings(concreteIndices, request.types()); mappingsResult = state.metaData().findMappings(concreteIndices, request.types());
@ -120,6 +111,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
throw new IllegalStateException("feature [" + feature + "] is not valid"); throw new IllegalStateException("feature [" + feature + "] is not valid");
} }
} }
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings)); listener.onResponse(new GetIndexResponse(concreteIndices, mappingsResult, aliasesResult, settings));
} }
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.mapping.put; package org.elasticsearch.action.admin.indices.mapping.put;
import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectHashSet;
import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
@ -96,7 +97,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
* Sets the indices this put mapping operation will execute on. * Sets the indices this put mapping operation will execute on.
*/ */
@Override @Override
public PutMappingRequest indices(String[] indices) { public PutMappingRequest indices(String... indices) {
this.indices = indices; this.indices = indices;
return this; return this;
} }

View File

@ -32,7 +32,7 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;

View File

@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
@ -59,7 +60,6 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.concurrent.atomic.AtomicReferenceArray;
/** /**
@ -108,7 +108,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override @Override
protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) { protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) {
// Hard-code routing to limit request to a single shard, but still, randomize it... // Hard-code routing to limit request to a single shard, but still, randomize it...
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices()); Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(Randomness.get().nextInt(1000)), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local"); return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local");
} }

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Action for the admin/warmers/delete API.
*/
public class DeleteWarmerAction extends Action<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction();
public static final String NAME = "indices:admin/warmers/delete";
private DeleteWarmerAction() {
super(NAME);
}
@Override
public DeleteWarmerResponse newResponse() {
return new DeleteWarmerResponse();
}
@Override
public DeleteWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new DeleteWarmerRequestBuilder(client, this);
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A request that deletes a index warmer (name, {@link org.elasticsearch.action.search.SearchRequest})
* tuple from the clusters metadata.
*/
public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> implements IndicesRequest.Replaceable {
private String[] names = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] indices = Strings.EMPTY_ARRAY;
public DeleteWarmerRequest() {
}
/**
* Constructs a new delete warmer request for the specified name.
*
* @param names the name (or wildcard expression) of the warmer to match, null to delete all.
*/
public DeleteWarmerRequest(String... names) {
names(names);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(names)) {
validationException = addValidationError("warmer names are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, names);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("indices are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
}
/**
* The name to delete.
*/
@Nullable
public String[] names() {
return names;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequest names(@Nullable String... names) {
this.names = names;
return this;
}
/**
* Sets the indices this put mapping operation will execute on.
*/
@Override
public DeleteWarmerRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be put.
*/
@Override
public String[] indices() {
return indices;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
names = in.readStringArray();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(names);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
writeTimeout(out);
}
}

View File

@ -1,60 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* A builder for the {@link DeleteWarmerRequest}
*
* @see DeleteWarmerRequest for details
*/
public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public DeleteWarmerRequestBuilder(ElasticsearchClient client, DeleteWarmerAction action) {
super(client, action, new DeleteWarmerRequest());
}
public DeleteWarmerRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequestBuilder setNames(String... names) {
request.names(names);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>
* For example indices that don't exist.
*/
public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
}

View File

@ -1,51 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* An acknowledged response of delete warmer operation.
*/
public class DeleteWarmerResponse extends AcknowledgedResponse {
DeleteWarmerResponse() {
super();
}
DeleteWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}

View File

@ -1,163 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Internal Actions executed on the master deleting the warmer from the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportDeleteWarmerAction extends TransportMasterNodeAction<DeleteWarmerRequest, DeleteWarmerResponse> {
@Inject
public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteWarmerRequest::new);
}
@Override
protected String executor() {
// we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected DeleteWarmerResponse newResponse() {
return new DeleteWarmerResponse();
}
@Override
protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
}
@Override
protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask<DeleteWarmerResponse>(request, listener) {
@Override
protected DeleteWarmerResponse newResponse(boolean acknowledged) {
return new DeleteWarmerResponse(acknowledged);
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), concreteIndices);
super.onFailure(source, t);
}
@Override
public ClusterState execute(ClusterState currentState) {
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
boolean globalFoundAtLeastOne = false;
boolean deleteAll = false;
for (int i=0; i<request.names().length; i++){
if (request.names()[i].equals(MetaData.ALL)) {
deleteAll = true;
break;
}
}
for (String index : concreteIndices) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers != null) {
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>();
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
boolean keepWarmer = true;
for (String warmer : request.names()) {
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
globalFoundAtLeastOne = true;
keepWarmer = false;
// don't add it...
break;
}
}
if (keepWarmer) {
entries.add(entry);
}
}
// a change, update it...
if (entries.size() != warmers.entries().size()) {
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
mdBuilder.put(indexBuilder);
}
}
}
if (globalFoundAtLeastOne == false && deleteAll == false) {
throw new IndexWarmerMissingException(request.names());
}
if (logger.isInfoEnabled()) {
for (String index : concreteIndices) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers != null) {
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
for (String warmer : request.names()) {
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
logger.info("[{}] delete warmer [{}]", index, entry.name());
}
}
}
} else if(deleteAll){
logger.debug("no warmers to delete on index [{}]", index);
}
}
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
});
}
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Action for the admin/warmers/get API.
*/
public class GetWarmersAction extends Action<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
public static final GetWarmersAction INSTANCE = new GetWarmersAction();
public static final String NAME = "indices:admin/warmers/get";
private GetWarmersAction() {
super(NAME);
}
@Override
public GetWarmersRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new GetWarmersRequestBuilder(client, this);
}
@Override
public GetWarmersResponse newResponse() {
return new GetWarmersResponse();
}
}

View File

@ -1,64 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* A {@link ClusterInfoRequest} that fetches {@link org.elasticsearch.search.warmer.IndexWarmersMetaData} for
* a list or all existing index warmers in the cluster-state
*/
public class GetWarmersRequest extends ClusterInfoRequest<GetWarmersRequest> {
private String[] warmers = Strings.EMPTY_ARRAY;
public GetWarmersRequest warmers(String[] warmers) {
this.warmers = warmers;
return this;
}
public String[] warmers() {
return warmers;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
warmers = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(warmers);
}
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.util.ArrayUtils;
/**
* Builder for {@link GetWarmersRequest}
*
* @see GetWarmersRequest for details
*/
public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
public GetWarmersRequestBuilder(ElasticsearchClient client, GetWarmersAction action, String... indices) {
super(client, action, new GetWarmersRequest().indices(indices));
}
public GetWarmersRequestBuilder setWarmers(String... warmers) {
request.warmers(warmers);
return this;
}
public GetWarmersRequestBuilder addWarmers(String... warmers) {
request.warmers(ArrayUtils.concat(request.warmers(), warmers));
return this;
}
}

View File

@ -1,107 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified
* in the {@link GetWarmersRequest}. This information is fetched from the current master since the metadata
* is contained inside the cluster-state
*/
public class GetWarmersResponse extends ActionResponse {
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
GetWarmersResponse(ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers) {
this.warmers = warmers;
}
GetWarmersResponse() {
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
return warmers;
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
return warmers();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < size; i++) {
String key = in.readString();
int valueSize = in.readVInt();
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
for (int j = 0; j < valueSize; j++) {
String name = in.readString();
String[] types = in.readStringArray();
IndexWarmersMetaData.SearchSource source = null;
if (in.readBoolean()) {
source = new IndexWarmersMetaData.SearchSource(in);
}
Boolean queryCache = null;
queryCache = in.readOptionalBoolean();
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
name,
types,
queryCache,
source)
);
}
indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
}
warmers = indexMapBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(warmers.size());
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
out.writeString(indexEntry.key);
out.writeVInt(indexEntry.value.size());
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
out.writeString(warmerEntry.name());
out.writeStringArray(warmerEntry.types());
boolean hasWarmerSource = warmerEntry != null;
out.writeBoolean(hasWarmerSource);
if (hasWarmerSource) {
warmerEntry.source().writeTo(out);
}
out.writeOptionalBoolean(warmerEntry.requestCache());
}
}
}
}

View File

@ -1,75 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
/**
* Internal Actions executed on the master fetching the warmer from the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportGetWarmersAction extends TransportClusterInfoAction<GetWarmersRequest, GetWarmersResponse> {
@Inject
public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetWarmersRequest::new);
}
@Override
protected String executor() {
// very lightweight operation, no need to fork
return ThreadPool.Names.SAME;
}
@Override
protected ClusterBlockException checkBlock(GetWarmersRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
}
@Override
protected GetWarmersResponse newResponse() {
return new GetWarmersResponse();
}
@Override
protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetWarmersResponse> listener) {
ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
concreteIndices, request.types(), request.warmers()
);
listener.onResponse(new GetWarmersResponse(result));
}
}

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Index / Search Warmer Administrative Actions
* <p>
* Index warming allows to run registered search requests to warm up the index before it is available for search.
* With the near real time aspect of search, cold data (segments) will be warmed up before they become available for
* search. This includes things such as the query cache, filesystem cache, and loading field data for fields.
* </p>
*
* See the reference guide for more detailed information about the Indices / Search Warmer
*/
package org.elasticsearch.action.admin.indices.warmer;

View File

@ -1,153 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A request that associates a {@link SearchRequest} with a name in the cluster that is
* in-turn used to warm up indices before they are available for search.
*
* Note: neither the search request nor the name must be <code>null</code>
*/
public class PutWarmerRequest extends AcknowledgedRequest<PutWarmerRequest> implements IndicesRequest.Replaceable {
private String name;
private SearchRequest searchRequest;
public PutWarmerRequest() {
}
/**
* Constructs a new warmer.
*
* @param name The name of the warmer.
*/
public PutWarmerRequest(String name) {
this.name = name;
}
/**
* Sets the name of the warmer.
*/
public PutWarmerRequest name(String name) {
this.name = name;
return this;
}
public String name() {
return this.name;
}
/**
* Sets the search request to warm.
*/
public PutWarmerRequest searchRequest(SearchRequest searchRequest) {
this.searchRequest = searchRequest;
return this;
}
/**
* Sets the search request to warm.
*/
public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) {
this.searchRequest = searchRequest.request();
return this;
}
public SearchRequest searchRequest() {
return this.searchRequest;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (searchRequest == null) {
validationException = addValidationError("search request is missing", validationException);
} else {
validationException = searchRequest.validate();
}
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
return validationException;
}
@Override
public String[] indices() {
if (searchRequest == null) {
throw new IllegalStateException("unable to retrieve indices, search request is null");
}
return searchRequest.indices();
}
@Override
public IndicesRequest indices(String[] indices) {
if (searchRequest == null) {
throw new IllegalStateException("unable to set indices, search request is null");
}
searchRequest.indices(indices);
return this;
}
@Override
public IndicesOptions indicesOptions() {
if (searchRequest == null) {
throw new IllegalStateException("unable to retrieve indices options, search request is null");
}
return searchRequest.indicesOptions();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
name = in.readString();
if (in.readBoolean()) {
searchRequest = new SearchRequest();
searchRequest.readFrom(in);
}
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
if (searchRequest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
searchRequest.writeTo(out);
}
writeTimeout(out);
}
}

View File

@ -1,72 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Builder for {@link PutWarmerRequest}
*
* @see PutWarmerRequest for details
*/
public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
/**
* Creates a new {@link PutWarmerRequestBuilder} with a given name.
*/
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action, String name) {
super(client, action, new PutWarmerRequest().name(name));
}
/**
* Creates a new {@link PutWarmerRequestBuilder}
* Note: {@link #setName(String)} must be called with a non-null value before this request is executed.
*/
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action) {
super(client, action, new PutWarmerRequest());
}
/**
* Sets the name of the warmer.
*/
public PutWarmerRequestBuilder setName(String name) {
request.name(name);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) {
request.searchRequest(searchRequest);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) {
request.searchRequest(searchRequest);
return this;
}
}

View File

@ -1,52 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* An acknowledged response of put warmer operation.
*/
public class PutWarmerResponse extends AcknowledgedResponse {
PutWarmerResponse() {
super();
}
PutWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}

View File

@ -1,167 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Internal Actions executed on the master associating a warmer with a name in the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportPutWarmerAction extends TransportMasterNodeAction<PutWarmerRequest, PutWarmerResponse> {
private final TransportSearchAction searchAction;
@Inject
public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
TransportSearchAction searchAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutWarmerRequest::new);
this.searchAction = searchAction;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected PutWarmerResponse newResponse() {
return new PutWarmerResponse();
}
@Override
protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
ClusterBlockException status = state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
if (status != null) {
return status;
}
// PutWarmer executes a SearchQuery before adding the new warmer to the cluster state,
// so we need to check the same block as TransportSearchTypeAction here
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener<PutWarmerResponse> listener) {
// first execute the search request, see that its ok...
SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request);
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
if (searchResponse.getFailedShards() > 0) {
listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures())));
return;
}
clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask<PutWarmerResponse>(request, listener) {
@Override
protected PutWarmerResponse newResponse(boolean acknowledged) {
return new PutWarmerResponse(acknowledged);
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices());
super.onFailure(source, t);
}
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(currentState, request.searchRequest().indicesOptions(), request.searchRequest().indices());
IndexWarmersMetaData.SearchSource source = null;
if (request.searchRequest().source() != null) {
source = new IndexWarmersMetaData.SearchSource(request.searchRequest().source());
}
// now replace it on the metadata
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (String index : concreteIndices) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers == null) {
logger.info("[{}] putting warmer [{}]", index, request.name());
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
boolean found = false;
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>(warmers.entries().size() + 1);
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
if (entry.name().equals(request.name())) {
found = true;
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
entries.add(entry);
}
}
if (!found) {
logger.info("[{}] put warmer [{}]", index, request.name());
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
logger.info("[{}] update warmer [{}]", index, request.name());
}
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
}
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
mdBuilder.put(indexBuilder);
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
});
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
}

View File

@ -419,7 +419,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} }
case NONE: case NONE:
UpdateResponse updateResponse = translate.action(); UpdateResponse updateResponse = translate.action();
indexShard.indexingService().noopUpdate(updateRequest.type()); indexShard.noopUpdate(updateRequest.type());
return new UpdateResult(translate, updateResponse); return new UpdateResult(translate, updateResponse);
default: default:
throw new IllegalStateException("Illegal update operation " + translate.operation()); throw new IllegalStateException("Illegal update operation " + translate.operation());

View File

@ -52,7 +52,7 @@ public class PercolateShardRequest extends BroadcastShardRequest {
this.startTime = request.startTime; this.startTime = request.startTime;
} }
PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) { public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
super(shardId, originalIndices); super(shardId, originalIndices);
} }
@ -81,15 +81,15 @@ public class PercolateShardRequest extends BroadcastShardRequest {
return onlyCount; return onlyCount;
} }
void documentType(String documentType) { public void documentType(String documentType) {
this.documentType = documentType; this.documentType = documentType;
} }
void source(BytesReference source) { public void source(BytesReference source) {
this.source = source; this.source = source;
} }
void docSource(BytesReference docSource) { public void docSource(BytesReference docSource) {
this.docSource = docSource; this.docSource = docSource;
} }

View File

@ -18,11 +18,12 @@
*/ */
package org.elasticsearch.action.percolate; package org.elasticsearch.action.percolate;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolateContext; import org.elasticsearch.percolator.PercolateContext;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
@ -43,31 +44,24 @@ import java.util.Map;
*/ */
public class PercolateShardResponse extends BroadcastShardResponse { public class PercolateShardResponse extends BroadcastShardResponse {
private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0]; private TopDocs topDocs;
private static final float[] EMPTY_SCORES = new float[0]; private Map<Integer, String> ids;
private static final List<Map<String, HighlightField>> EMPTY_HL = Collections.emptyList(); private Map<Integer, Map<String, HighlightField>> hls;
private boolean onlyCount;
private long count;
private float[] scores;
private BytesRef[] matches;
private List<Map<String, HighlightField>> hls;
private byte percolatorTypeId;
private int requestedSize; private int requestedSize;
private InternalAggregations aggregations; private InternalAggregations aggregations;
private List<SiblingPipelineAggregator> pipelineAggregators; private List<SiblingPipelineAggregator> pipelineAggregators;
PercolateShardResponse() { PercolateShardResponse() {
hls = new ArrayList<>();
} }
public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, float[] scores, PercolateContext context, ShardId shardId) { public PercolateShardResponse(TopDocs topDocs, Map<Integer, String> ids, Map<Integer, Map<String, HighlightField>> hls, PercolateContext context) {
super(shardId); super(new ShardId(context.shardTarget().getIndex(), context.shardTarget().getShardId()));
this.matches = matches; this.topDocs = topDocs;
this.ids = ids;
this.hls = hls; this.hls = hls;
this.count = count; this.onlyCount = context.isOnlyCount();
this.scores = scores;
this.percolatorTypeId = context.percolatorTypeId;
this.requestedSize = context.size(); this.requestedSize = context.size();
QuerySearchResult result = context.queryResult(); QuerySearchResult result = context.queryResult();
if (result != null) { if (result != null) {
@ -78,39 +72,25 @@ public class PercolateShardResponse extends BroadcastShardResponse {
} }
} }
public PercolateShardResponse(BytesRef[] matches, long count, float[] scores, PercolateContext context, ShardId shardId) { public TopDocs topDocs() {
this(matches, EMPTY_HL, count, scores, context, shardId); return topDocs;
} }
public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, PercolateContext context, ShardId shardId) { /**
this(matches, hls, count, EMPTY_SCORES, context, shardId); * Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query.
} */
public Map<Integer, String> ids() {
public PercolateShardResponse(long count, PercolateContext context, ShardId shardId) { return ids;
this(EMPTY_MATCHES, EMPTY_HL, count, EMPTY_SCORES, context, shardId);
}
public PercolateShardResponse(PercolateContext context, ShardId shardId) {
this(EMPTY_MATCHES, EMPTY_HL, 0, EMPTY_SCORES, context, shardId);
}
public BytesRef[] matches() {
return matches;
}
public float[] scores() {
return scores;
}
public long count() {
return count;
} }
public int requestedSize() { public int requestedSize() {
return requestedSize; return requestedSize;
} }
public List<Map<String, HighlightField>> hls() { /**
* Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query.
*/
public Map<Integer, Map<String, HighlightField>> hls() {
return hls; return hls;
} }
@ -122,36 +102,35 @@ public class PercolateShardResponse extends BroadcastShardResponse {
return pipelineAggregators; return pipelineAggregators;
} }
public byte percolatorTypeId() { public boolean onlyCount() {
return percolatorTypeId; return onlyCount;
} }
public boolean isEmpty() { public boolean isEmpty() {
return percolatorTypeId == 0x00; return topDocs.totalHits == 0;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
percolatorTypeId = in.readByte(); onlyCount = in.readBoolean();
requestedSize = in.readVInt(); requestedSize = in.readVInt();
count = in.readVLong(); topDocs = Lucene.readTopDocs(in);
matches = new BytesRef[in.readVInt()];
for (int i = 0; i < matches.length; i++) {
matches[i] = in.readBytesRef();
}
scores = new float[in.readVInt()];
for (int i = 0; i < scores.length; i++) {
scores[i] = in.readFloat();
}
int size = in.readVInt(); int size = in.readVInt();
ids = new HashMap<>(size);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
ids.put(in.readVInt(), in.readString());
}
size = in.readVInt();
hls = new HashMap<>(size);
for (int i = 0; i < size; i++) {
int docId = in.readVInt();
int mSize = in.readVInt(); int mSize = in.readVInt();
Map<String, HighlightField> fields = new HashMap<>(); Map<String, HighlightField> fields = new HashMap<>();
for (int j = 0; j < mSize; j++) { for (int j = 0; j < mSize; j++) {
fields.put(in.readString(), HighlightField.readHighlightField(in)); fields.put(in.readString(), HighlightField.readHighlightField(in));
} }
hls.add(fields); hls.put(docId, fields);
} }
aggregations = InternalAggregations.readOptionalAggregations(in); aggregations = InternalAggregations.readOptionalAggregations(in);
if (in.readBoolean()) { if (in.readBoolean()) {
@ -169,23 +148,21 @@ public class PercolateShardResponse extends BroadcastShardResponse {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeByte(percolatorTypeId); out.writeBoolean(onlyCount);
out.writeVLong(requestedSize); out.writeVLong(requestedSize);
out.writeVLong(count); Lucene.writeTopDocs(out, topDocs);
out.writeVInt(matches.length); out.writeVInt(ids.size());
for (BytesRef match : matches) { for (Map.Entry<Integer, String> entry : ids.entrySet()) {
out.writeBytesRef(match); out.writeVInt(entry.getKey());
} out.writeString(entry.getValue());
out.writeVLong(scores.length);
for (float score : scores) {
out.writeFloat(score);
} }
out.writeVInt(hls.size()); out.writeVInt(hls.size());
for (Map<String, HighlightField> hl : hls) { for (Map.Entry<Integer, Map<String, HighlightField>> entry1 : hls.entrySet()) {
out.writeVInt(hl.size()); out.writeVInt(entry1.getKey());
for (Map.Entry<String, HighlightField> entry : hl.entrySet()) { out.writeVInt(entry1.getValue().size());
out.writeString(entry.getKey()); for (Map.Entry<String, HighlightField> entry2 : entry1.getValue().entrySet()) {
entry.getValue().writeTo(out); out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
} }
} }
out.writeOptionalStreamable(aggregations); out.writeOptionalStreamable(aggregations);

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.percolate; package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetRequest;
@ -43,6 +44,7 @@ import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -117,7 +119,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
List<PercolateShardResponse> shardResults = null; List<PercolateShardResponse> shardResults = null;
List<ShardOperationFailedException> shardFailures = null; List<ShardOperationFailedException> shardFailures = null;
byte percolatorTypeId = 0x00; boolean onlyCount = false;
for (int i = 0; i < shardsResponses.length(); i++) { for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i); Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) { if (shardResponse == null) {
@ -133,7 +135,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
successfulShards++; successfulShards++;
if (!percolateShardResponse.isEmpty()) { if (!percolateShardResponse.isEmpty()) {
if (shardResults == null) { if (shardResults == null) {
percolatorTypeId = percolateShardResponse.percolatorTypeId(); onlyCount = percolateShardResponse.onlyCount();
shardResults = new ArrayList<>(); shardResults = new ArrayList<>();
} }
shardResults.add(percolateShardResponse); shardResults.add(percolateShardResponse);
@ -146,7 +148,12 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY; PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches); return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
} else { } else {
PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults, request); PercolatorService.ReduceResult result = null;
try {
result = percolatorService.reduce(onlyCount, shardResults, request);
} catch (IOException e) {
throw new ElasticsearchException("error during reduce phase", e);
}
long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime); long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime);
return new PercolateResponse( return new PercolateResponse(
shardsResponses.length(), successfulShards, failedShards, shardFailures, shardsResponses.length(), successfulShards, failedShards, shardFailures,

View File

@ -26,10 +26,10 @@ import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -51,20 +51,6 @@ import java.util.function.Supplier;
* A base class for operations that needs to be performed on the master node. * A base class for operations that needs to be performed on the master node.
*/ */
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> { public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
private static final ClusterStateObserver.ChangePredicate masterNodeChangedPredicate = new ClusterStateObserver.ChangePredicate() {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
// The condition !newState.nodes().masterNodeId().equals(previousState.nodes().masterNodeId()) is not sufficient as the same master node might get reelected after a disruption.
return newState.nodes().masterNodeId() != null && newState != previousState;
}
@Override
public boolean apply(ClusterChangedEvent event) {
return event.nodesDelta().masterNodeChanged();
}
};
protected final TransportService transportService; protected final TransportService transportService;
protected final ClusterService clusterService; protected final ClusterService clusterService;
@ -164,7 +150,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
if (t instanceof Discovery.FailedToCommitClusterStateException if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) { || (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName); logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
retry(t, masterNodeChangedPredicate); retry(t, MasterNodeChangePredicate.INSTANCE);
} else { } else {
listener.onFailure(t); listener.onFailure(t);
} }
@ -180,7 +166,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
} else { } else {
if (nodes.masterNode() == null) { if (nodes.masterNode() == null) {
logger.debug("no known master node, scheduling a retry"); logger.debug("no known master node, scheduling a retry");
retry(null, masterNodeChangedPredicate); retry(null, MasterNodeChangePredicate.INSTANCE);
} else { } else {
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) { transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
@Override @Override
@ -195,7 +181,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
// we want to retry here a bit to see if a new master is elected // we want to retry here a bit to see if a new master is elected
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
actionName, nodes.masterNode(), exp.getDetailedMessage()); actionName, nodes.masterNode(), exp.getDetailedMessage());
retry(cause, masterNodeChangedPredicate); retry(cause, MasterNodeChangePredicate.INSTANCE);
} else { } else {
listener.onFailure(exp); listener.onFailure(exp);
} }

View File

@ -851,11 +851,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
// we never execute replication operation locally as primary operation has already completed locally // we never execute replication operation locally as primary operation has already completed locally
// hence, we ignore any local shard for replication // hence, we ignore any local shard for replication
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) { if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
performOnReplica(shard, shard.currentNodeId()); performOnReplica(shard);
} }
// send operation to relocating shard // send operation to relocating shard
if (shard.relocating()) { if (shard.relocating()) {
performOnReplica(shard, shard.relocatingNodeId()); performOnReplica(shard.buildTargetRelocatingShard());
} }
} }
} }
@ -863,9 +863,10 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/** /**
* send replica operation to target node * send replica operation to target node
*/ */
void performOnReplica(final ShardRouting shard, final String nodeId) { void performOnReplica(final ShardRouting shard) {
// if we don't have that node, it means that it might have failed and will be created again, in // if we don't have that node, it means that it might have failed and will be created again, in
// this case, we don't have to do the operation, and just let it failover // this case, we don't have to do the operation, and just let it failover
String nodeId = shard.currentNodeId();
if (!nodes.nodeExists(nodeId)) { if (!nodes.nodeExists(nodeId)) {
logger.trace("failed to send action [{}] on replica [{}] for request [{}] due to unknown node [{}]", transportReplicaAction, shard.shardId(), replicaRequest, nodeId); logger.trace("failed to send action [{}] on replica [{}] for request [{}] due to unknown node [{}]", transportReplicaAction, shard.shardId(), replicaRequest, nodeId);
onReplicaFailure(nodeId, null); onReplicaFailure(nodeId, null);

View File

@ -272,7 +272,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
if (indexServiceOrNull != null) { if (indexServiceOrNull != null) {
IndexShard shard = indexService.getShardOrNull(request.shardId()); IndexShard shard = indexService.getShardOrNull(request.shardId());
if (shard != null) { if (shard != null) {
shard.indexingService().noopUpdate(request.type()); shard.noopUpdate(request.type());
} }
} }
listener.onResponse(update); listener.onResponse(update);

View File

@ -113,15 +113,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
/** /**
@ -771,51 +762,6 @@ public interface IndicesAdminClient extends ElasticsearchClient {
*/ */
ValidateQueryRequestBuilder prepareValidateQuery(String... indices); ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
/**
* Puts an index search warmer to be applies when applicable.
*/
ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request);
/**
* Puts an index search warmer to be applies when applicable.
*/
void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener);
/**
* Puts an index search warmer to be applies when applicable.
*/
PutWarmerRequestBuilder preparePutWarmer(String name);
/**
* Deletes an index warmer.
*/
ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request);
/**
* Deletes an index warmer.
*/
void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener);
/**
* Deletes an index warmer.
*/
DeleteWarmerRequestBuilder prepareDeleteWarmer();
/**
* Returns a map of index warmers for the given get request.
*/
void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener);
/**
* Returns a map of index warmers for the given get request.
*/
ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request);
/**
* Returns a new builder to fetch index warmer metadata for the given indices.
*/
GetWarmersRequestBuilder prepareGetWarmers(String... indices);
/** /**
* Executed a per index settings get request and returns the settings for the indices specified. * Executed a per index settings get request and returns the settings for the indices specified.
* Note: this is a per index request and will not include settings that are set on the cluster * Note: this is a per index request and will not include settings that are set on the cluster

View File

@ -232,18 +232,6 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder;
@ -1669,51 +1657,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices); return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices);
} }
@Override
public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
return execute(PutWarmerAction.INSTANCE, request);
}
@Override
public void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener) {
execute(PutWarmerAction.INSTANCE, request, listener);
}
@Override
public PutWarmerRequestBuilder preparePutWarmer(String name) {
return new PutWarmerRequestBuilder(this, PutWarmerAction.INSTANCE, name);
}
@Override
public ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request) {
return execute(DeleteWarmerAction.INSTANCE, request);
}
@Override
public void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener) {
execute(DeleteWarmerAction.INSTANCE, request, listener);
}
@Override
public DeleteWarmerRequestBuilder prepareDeleteWarmer() {
return new DeleteWarmerRequestBuilder(this, DeleteWarmerAction.INSTANCE);
}
@Override
public GetWarmersRequestBuilder prepareGetWarmers(String... indices) {
return new GetWarmersRequestBuilder(this, GetWarmersAction.INSTANCE, indices);
}
@Override
public ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request) {
return execute(GetWarmersAction.INSTANCE, request);
}
@Override
public void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener) {
execute(GetWarmersAction.INSTANCE, request, listener);
}
@Override @Override
public GetSettingsRequestBuilder prepareGetSettings(String... indices) { public GetSettingsRequestBuilder prepareGetSettings(String... indices) {
return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices); return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices);

View File

@ -67,7 +67,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.indexing.IndexingSlowLog; import org.elasticsearch.index.IndexingSlowLog;
import org.elasticsearch.index.search.stats.SearchSlowLog; import org.elasticsearch.index.search.stats.SearchSlowLog;
import org.elasticsearch.index.settings.IndexDynamicSettings; import org.elasticsearch.index.settings.IndexDynamicSettings;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;

View File

@ -17,30 +17,24 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.action.admin.indices.warmer.put; package org.elasticsearch.cluster;
import org.elasticsearch.action.Action; public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate {
import org.elasticsearch.client.ElasticsearchClient; INSTANCE;
/** @Override
* Action for the admin/warmers/put API. public boolean apply(
*/ ClusterState previousState,
public class PutWarmerAction extends Action<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> { ClusterState.ClusterStateStatus previousStatus,
ClusterState newState,
public static final PutWarmerAction INSTANCE = new PutWarmerAction(); ClusterState.ClusterStateStatus newStatus) {
public static final String NAME = "indices:admin/warmers/put"; // checking if the masterNodeId changed is insufficient as the
// same master node might get re-elected after a disruption
private PutWarmerAction() { return newState.nodes().masterNodeId() != null && newState != previousState;
super(NAME);
} }
@Override @Override
public PutWarmerResponse newResponse() { public boolean apply(ClusterChangedEvent changedEvent) {
return new PutWarmerResponse(); return changedEvent.nodesDelta().masterNodeChanged();
}
@Override
public PutWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new PutWarmerRequestBuilder(client, this);
} }
} }

View File

@ -302,6 +302,10 @@ public class ShardStateAction extends AbstractComponent {
this.failure = failure; this.failure = failure;
} }
public ShardRouting getShardRouting() {
return shardRouting;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);

View File

@ -48,7 +48,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.DateTimeZone; import org.joda.time.DateTimeZone;
@ -88,11 +87,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static Map<String, Custom> customPrototypes = new HashMap<>(); public static Map<String, Custom> customPrototypes = new HashMap<>();
static {
// register non plugin custom metadata
registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO);
}
/** /**
* Register a custom index meta data factory. Make sure to call it from a static block. * Register a custom index meta data factory. Make sure to call it from a static block.
*/ */
@ -950,10 +944,16 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token
parser.nextToken(); parser.nextToken();
} }
if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
throw new IllegalArgumentException("expected field name but got a " + parser.currentToken());
}
Builder builder = new Builder(parser.currentName()); Builder builder = new Builder(parser.currentName());
String currentFieldName = null; String currentFieldName = null;
XContentParser.Token token = parser.nextToken(); XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("expected object but got a " + token);
}
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
@ -968,6 +968,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
String mappingType = currentFieldName; String mappingType = currentFieldName;
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map(); Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(new MappingMetaData(mappingType, mappingSource)); builder.putMapping(new MappingMetaData(mappingType, mappingSource));
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
} }
} }
} else if (KEY_ALIASES.equals(currentFieldName)) { } else if (KEY_ALIASES.equals(currentFieldName)) {
@ -987,8 +989,17 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
} }
} }
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds); builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
} }
} }
} else if ("warmers".equals(currentFieldName)) {
// TODO: do this in 4.0:
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
// ignore: warmers have been removed in 3.0 and are
// simply ignored when upgrading from 2.x
assert Version.CURRENT.major <= 3;
parser.skipChildren();
} else { } else {
// check if its a custom index metadata // check if its a custom index metadata
Custom proto = lookupPrototype(currentFieldName); Custom proto = lookupPrototype(currentFieldName);
@ -1023,13 +1034,19 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
} }
} }
builder.primaryTerms(list.toArray()); builder.primaryTerms(list.toArray());
} else {
throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName);
} }
} else if (token.isValue()) { } else if (token.isValue()) {
if (KEY_STATE.equals(currentFieldName)) { if (KEY_STATE.equals(currentFieldName)) {
builder.state(State.fromString(parser.text())); builder.state(State.fromString(parser.text()));
} else if (KEY_VERSION.equals(currentFieldName)) { } else if (KEY_VERSION.equals(currentFieldName)) {
builder.version(parser.longValue()); builder.version(parser.longValue());
} else {
throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
} }
} else {
throw new IllegalArgumentException("Unexpected token " + token);
} }
} }
return builder.build(); return builder.build();

View File

@ -55,12 +55,10 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.EnumSet; import java.util.EnumSet;
@ -71,7 +69,6 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.stream.Collectors;
import static java.util.Collections.unmodifiableSet; import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
@ -365,49 +362,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return indexMapBuilder.build(); return indexMapBuilder.build();
} }
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
assert uncheckedWarmers != null;
assert concreteIndices != null;
if (concreteIndices.length == 0) {
return ImmutableOpenMap.of();
}
// special _all check to behave the same like not specifying anything for the warmers (not for the indices)
final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
for (String index : intersection) {
IndexMetaData indexMetaData = indices.get(index);
IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) {
continue;
}
// TODO: make this a List so we don't have to copy below
Collection<IndexWarmersMetaData.Entry> filteredWarmers =
indexWarmersMetaData
.entries()
.stream()
.filter(warmer -> {
if (warmers.length != 0 && types.length != 0) {
return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
} else if (warmers.length != 0) {
return Regex.simpleMatch(warmers, warmer.name());
} else if (types.length != 0) {
return Regex.simpleMatch(types, warmer.types());
} else {
return true;
}
})
.collect(Collectors.toCollection(ArrayList::new));
if (!filteredWarmers.isEmpty()) {
mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers)));
}
}
return mapBuilder.build();
}
/** /**
* Returns all the concrete indices. * Returns all the concrete indices.
*/ */
@ -1120,14 +1074,20 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
if (token == XContentParser.Token.START_OBJECT) { if (token == XContentParser.Token.START_OBJECT) {
// move to the field name (meta-data) // move to the field name (meta-data)
token = parser.nextToken(); token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new IllegalArgumentException("Expected a field name but got " + token);
}
// move to the next object // move to the next object
token = parser.nextToken(); token = parser.nextToken();
} }
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
if (token == null) {
// no data...
return builder.build();
} }
if (!"meta-data".equals(parser.currentName())) {
throw new IllegalArgumentException("Expected [meta-data] as a field name but got " + currentFieldName);
}
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Expected a START_OBJECT but got " + token);
} }
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@ -1160,7 +1120,11 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
builder.version = parser.longValue(); builder.version = parser.longValue();
} else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) { } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) {
builder.clusterUUID = parser.text(); builder.clusterUUID = parser.text();
} else {
throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
} }
} else {
throw new IllegalArgumentException("Unexpected token " + token);
} }
} }
return builder.build(); return builder.build();

View File

@ -223,7 +223,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) { try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) { try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) { for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value; MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false); mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);

View File

@ -26,6 +26,7 @@ import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -39,7 +40,6 @@ import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
/** /**
* The {@link IndexRoutingTable} represents routing information for a single * The {@link IndexRoutingTable} represents routing information for a single
@ -71,7 +71,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
IndexRoutingTable(String index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) { IndexRoutingTable(String index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) {
this.index = index; this.index = index;
this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); this.shuffler = new RotationShardShuffler(Randomness.get().nextInt());
this.shards = shards; this.shards = shards;
List<ShardRouting> allActiveShards = new ArrayList<>(); List<ShardRouting> allActiveShards = new ArrayList<>();
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) { for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -28,8 +29,14 @@ import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.ArrayList;
import java.util.concurrent.ThreadLocalRandom; import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
@ -66,7 +73,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
IndexShardRoutingTable(ShardId shardId, List<ShardRouting> shards) { IndexShardRoutingTable(ShardId shardId, List<ShardRouting> shards) {
this.shardId = shardId; this.shardId = shardId;
this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); this.shuffler = new RotationShardShuffler(Randomness.get().nextInt());
this.shards = Collections.unmodifiableList(shards); this.shards = Collections.unmodifiableList(shards);
ShardRouting primary = null; ShardRouting primary = null;
@ -419,13 +426,21 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) return true; if (this == o) {
if (o == null || getClass() != o.getClass()) return false; return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
IndexShardRoutingTable that = (IndexShardRoutingTable) o; IndexShardRoutingTable that = (IndexShardRoutingTable) o;
if (!shardId.equals(that.shardId)) return false; if (!shardId.equals(that.shardId)) {
if (!shards.equals(that.shards)) return false; return false;
}
if (!shards.equals(that.shards)) {
return false;
}
return true; return true;
} }

View File

@ -109,6 +109,7 @@ public final class Randomness {
} }
} }
@SuppressForbidden(reason = "ThreadLocalRandom is okay when not running tests")
private static Random getWithoutSeed() { private static Random getWithoutSeed() {
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
return ThreadLocalRandom.current(); return ThreadLocalRandom.current();

View File

@ -59,6 +59,7 @@ public enum MurmurHash3 {
* Note, this hashing function might be used to persist hashes, so if the way hashes are computed * Note, this hashing function might be used to persist hashes, so if the way hashes are computed
* changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField).
*/ */
@SuppressWarnings("fallthrough") // Intentionally uses fallthrough to implement a well known hashing algorithm
public static Hash128 hash128(byte[] key, int offset, int length, long seed, Hash128 hash) { public static Hash128 hash128(byte[] key, int offset, int length, long seed, Hash128 hash) {
long h1 = seed; long h1 = seed;
long h2 = seed; long h2 = seed;

View File

@ -258,6 +258,12 @@ public final class FactoryProvider2<F> implements InvocationHandler, Provider<F>
return o == this || o == factory; return o == this || o == factory;
} }
@Override
public int hashCode() {
// This way both this and its factory hash to the same spot, making hashCode consistent.
return factory.hashCode();
}
/** /**
* Returns true if {@code thrown} can be thrown by {@code invoked} without wrapping. * Returns true if {@code thrown} can be thrown by {@code invoked} without wrapping.
*/ */

View File

@ -39,10 +39,12 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
protected int count; protected int count;
/** /**
* Create a non recycling {@link BytesStreamOutput} with 1 initial page acquired. * Create a non recycling {@link BytesStreamOutput} with an initial capacity of 0.
*/ */
public BytesStreamOutput() { public BytesStreamOutput() {
this(BigArrays.PAGE_SIZE_IN_BYTES); // since this impl is not recycling anyway, don't bother aligning to
// the page size, this will even save memory
this(0);
} }
/** /**

View File

@ -86,11 +86,6 @@ import java.util.Objects;
* *
*/ */
public class Lucene { public class Lucene {
// TODO: remove VERSION, and have users use Version.LATEST.
public static final Version VERSION = Version.LATEST;
public static final Version ANALYZER_VERSION = VERSION;
public static final Version QUERYPARSER_VERSION = VERSION;
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54"; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
public static final String LATEST_CODEC = "Lucene54"; public static final String LATEST_CODEC = "Lucene54";
@ -109,7 +104,6 @@ public class Lucene {
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f);
@SuppressWarnings("deprecation")
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) { public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
if (version == null) { if (version == null) {
return defaultVersion; return defaultVersion;

View File

@ -1,593 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.math;
import org.elasticsearch.common.SuppressForbidden;
import java.util.concurrent.ThreadLocalRandom;
/**
*
*/
public class UnboxedMathUtils {
public static double sin(Short a) {
return Math.sin(a.doubleValue());
}
public static double sin(Integer a) {
return Math.sin(a.doubleValue());
}
public static double sin(Float a) {
return Math.sin(a.doubleValue());
}
public static double sin(Long a) {
return Math.sin(a.doubleValue());
}
public static double sin(Double a) {
return Math.sin(a);
}
public static double cos(Short a) {
return Math.cos(a.doubleValue());
}
public static double cos(Integer a) {
return Math.cos(a.doubleValue());
}
public static double cos(Float a) {
return Math.cos(a.doubleValue());
}
public static double cos(Long a) {
return Math.cos(a.doubleValue());
}
public static double cos(Double a) {
return Math.cos(a);
}
public static double tan(Short a) {
return Math.tan(a.doubleValue());
}
public static double tan(Integer a) {
return Math.tan(a.doubleValue());
}
public static double tan(Float a) {
return Math.tan(a.doubleValue());
}
public static double tan(Long a) {
return Math.tan(a.doubleValue());
}
public static double tan(Double a) {
return Math.tan(a);
}
public static double asin(Short a) {
return Math.asin(a.doubleValue());
}
public static double asin(Integer a) {
return Math.asin(a.doubleValue());
}
public static double asin(Float a) {
return Math.asin(a.doubleValue());
}
public static double asin(Long a) {
return Math.asin(a.doubleValue());
}
public static double asin(Double a) {
return Math.asin(a);
}
public static double acos(Short a) {
return Math.acos(a.doubleValue());
}
public static double acos(Integer a) {
return Math.acos(a.doubleValue());
}
public static double acos(Float a) {
return Math.acos(a.doubleValue());
}
public static double acos(Long a) {
return Math.acos(a.doubleValue());
}
public static double acos(Double a) {
return Math.acos(a);
}
public static double atan(Short a) {
return Math.atan(a.doubleValue());
}
public static double atan(Integer a) {
return Math.atan(a.doubleValue());
}
public static double atan(Float a) {
return Math.atan(a.doubleValue());
}
public static double atan(Long a) {
return Math.atan(a.doubleValue());
}
public static double atan(Double a) {
return Math.atan(a);
}
public static double toRadians(Short angdeg) {
return Math.toRadians(angdeg.doubleValue());
}
public static double toRadians(Integer angdeg) {
return Math.toRadians(angdeg.doubleValue());
}
public static double toRadians(Float angdeg) {
return Math.toRadians(angdeg.doubleValue());
}
public static double toRadians(Long angdeg) {
return Math.toRadians(angdeg.doubleValue());
}
public static double toRadians(Double angdeg) {
return Math.toRadians(angdeg);
}
public static double toDegrees(Short angrad) {
return Math.toDegrees(angrad.doubleValue());
}
public static double toDegrees(Integer angrad) {
return Math.toDegrees(angrad.doubleValue());
}
public static double toDegrees(Float angrad) {
return Math.toDegrees(angrad.doubleValue());
}
public static double toDegrees(Long angrad) {
return Math.toDegrees(angrad.doubleValue());
}
public static double toDegrees(Double angrad) {
return Math.toDegrees(angrad);
}
public static double exp(Short a) {
return Math.exp(a.doubleValue());
}
public static double exp(Integer a) {
return Math.exp(a.doubleValue());
}
public static double exp(Float a) {
return Math.exp(a.doubleValue());
}
public static double exp(Long a) {
return Math.exp(a.doubleValue());
}
public static double exp(Double a) {
return Math.exp(a);
}
public static double log(Short a) {
return Math.log(a.doubleValue());
}
public static double log(Integer a) {
return Math.log(a.doubleValue());
}
public static double log(Float a) {
return Math.log(a.doubleValue());
}
public static double log(Long a) {
return Math.log(a.doubleValue());
}
public static double log(Double a) {
return Math.log(a);
}
public static double log10(Short a) {
return Math.log10(a.doubleValue());
}
public static double log10(Integer a) {
return Math.log10(a.doubleValue());
}
public static double log10(Float a) {
return Math.log10(a.doubleValue());
}
public static double log10(Long a) {
return Math.log10(a.doubleValue());
}
public static double log10(Double a) {
return Math.log10(a);
}
public static double sqrt(Short a) {
return Math.sqrt(a.doubleValue());
}
public static double sqrt(Integer a) {
return Math.sqrt(a.doubleValue());
}
public static double sqrt(Float a) {
return Math.sqrt(a.doubleValue());
}
public static double sqrt(Long a) {
return Math.sqrt(a.doubleValue());
}
public static double sqrt(Double a) {
return Math.sqrt(a);
}
public static double cbrt(Short a) {
return Math.cbrt(a.doubleValue());
}
public static double cbrt(Integer a) {
return Math.cbrt(a.doubleValue());
}
public static double cbrt(Float a) {
return Math.cbrt(a.doubleValue());
}
public static double cbrt(Long a) {
return Math.cbrt(a.doubleValue());
}
public static double cbrt(Double a) {
return Math.cbrt(a);
}
public static double IEEEremainder(Short f1, Short f2) {
return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
}
public static double IEEEremainder(Integer f1, Integer f2) {
return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
}
public static double IEEEremainder(Float f1, Float f2) {
return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
}
public static double IEEEremainder(Long f1, Long f2) {
return Math.IEEEremainder(f1.doubleValue(), f2.doubleValue());
}
public static double IEEEremainder(Double f1, Double f2) {
return Math.IEEEremainder(f1, f2);
}
public static double ceil(Short a) {
return Math.ceil(a.doubleValue());
}
public static double ceil(Integer a) {
return Math.ceil(a.doubleValue());
}
public static double ceil(Float a) {
return Math.ceil(a.doubleValue());
}
public static double ceil(Long a) {
return Math.ceil(a.doubleValue());
}
public static double ceil(Double a) {
return Math.ceil(a);
}
public static double floor(Short a) {
return Math.floor(a.doubleValue());
}
public static double floor(Integer a) {
return Math.floor(a.doubleValue());
}
public static double floor(Float a) {
return Math.floor(a.doubleValue());
}
public static double floor(Long a) {
return Math.floor(a.doubleValue());
}
public static double floor(Double a) {
return Math.floor(a);
}
public static double rint(Short a) {
return Math.rint(a.doubleValue());
}
public static double rint(Integer a) {
return Math.rint(a.doubleValue());
}
public static double rint(Float a) {
return Math.rint(a.doubleValue());
}
public static double rint(Long a) {
return Math.rint(a.doubleValue());
}
public static double rint(Double a) {
return Math.rint(a);
}
public static double atan2(Short y, Short x) {
return Math.atan2(y.doubleValue(), x.doubleValue());
}
public static double atan2(Integer y, Integer x) {
return Math.atan2(y.doubleValue(), x.doubleValue());
}
public static double atan2(Float y, Float x) {
return Math.atan2(y.doubleValue(), x.doubleValue());
}
public static double atan2(Long y, Long x) {
return Math.atan2(y.doubleValue(), x.doubleValue());
}
public static double atan2(Double y, Double x) {
return Math.atan2(y, x);
}
public static double pow(Short a, Short b) {
return Math.pow(a.doubleValue(), b.doubleValue());
}
public static double pow(Integer a, Integer b) {
return Math.pow(a.doubleValue(), b.doubleValue());
}
public static double pow(Float a, Float b) {
return Math.pow(a.doubleValue(), b.doubleValue());
}
public static double pow(Long a, Long b) {
return Math.pow(a.doubleValue(), b.doubleValue());
}
public static double pow(Double a, Double b) {
return Math.pow(a, b);
}
public static int round(Short a) {
return Math.round(a.floatValue());
}
public static int round(Integer a) {
return Math.round(a.floatValue());
}
public static int round(Float a) {
return Math.round(a);
}
public static long round(Long a) {
return Math.round(a.doubleValue());
}
public static long round(Double a) {
return Math.round(a);
}
public static double random() {
return ThreadLocalRandom.current().nextDouble();
}
public static double randomDouble() {
return ThreadLocalRandom.current().nextDouble();
}
public static double randomFloat() {
return ThreadLocalRandom.current().nextFloat();
}
public static double randomInt() {
return ThreadLocalRandom.current().nextInt();
}
public static double randomInt(Integer i) {
return ThreadLocalRandom.current().nextInt(i);
}
public static double randomLong() {
return ThreadLocalRandom.current().nextLong();
}
public static double randomLong(Long l) {
return ThreadLocalRandom.current().nextLong(l);
}
@SuppressForbidden(reason = "Math#abs is trappy")
public static int abs(Integer a) {
return Math.abs(a);
}
@SuppressForbidden(reason = "Math#abs is trappy")
public static long abs(Long a) {
return Math.abs(a);
}
@SuppressForbidden(reason = "Math#abs is trappy")
public static float abs(Float a) {
return Math.abs(a);
}
@SuppressForbidden(reason = "Math#abs is trappy")
public static double abs(Double a) {
return Math.abs(a);
}
public static int max(Integer a, Integer b) {
return Math.max(a, b);
}
public static long max(Long a, Long b) {
return Math.max(a, b);
}
public static float max(Float a, Float b) {
return Math.max(a, b);
}
public static double max(Double a, Double b) {
return Math.max(a, b);
}
public static int min(Integer a, Integer b) {
return Math.min(a, b);
}
public static long min(Long a, Long b) {
return Math.min(a, b);
}
public static float min(Float a, Float b) {
return Math.min(a, b);
}
public static double min(Double a, Double b) {
return Math.min(a, b);
}
public static double ulp(Double d) {
return Math.ulp(d);
}
public static float ulp(Float f) {
return Math.ulp(f);
}
public static double signum(Double d) {
return Math.signum(d);
}
public static float signum(Float f) {
return Math.signum(f);
}
public static double sinh(Double x) {
return Math.sinh(x);
}
public static double cosh(Double x) {
return Math.cosh(x);
}
public static double tanh(Double x) {
return Math.tanh(x);
}
public static double hypot(Double x, Double y) {
return Math.hypot(x, y);
}
public static double expm1(Double x) {
return Math.expm1(x);
}
public static double log1p(Double x) {
return Math.log1p(x);
}
public static double copySign(Double magnitude, Double sign) {
return Math.copySign(magnitude, sign);
}
public static float copySign(Float magnitude, Float sign) {
return Math.copySign(magnitude, sign);
}
public static int getExponent(Float f) {
return Math.getExponent(f);
}
public static int getExponent(Double d) {
return Math.getExponent(d);
}
public static double nextAfter(Double start, Double direction) {
return Math.nextAfter(start, direction);
}
public static float nextAfter(Float start, Double direction) {
return Math.nextAfter(start, direction);
}
public static double nextUp(Double d) {
return Math.nextUp(d);
}
public static float nextUp(Float f) {
return Math.nextUp(f);
}
public static double scalb(Double d, Integer scaleFactor) {
return Math.scalb(d, scaleFactor);
}
public static float scalb(Float f, Integer scaleFactor) {
return Math.scalb(f, scaleFactor);
}
}

View File

@ -88,9 +88,6 @@ import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemp
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
import org.elasticsearch.rest.action.bulk.RestBulkAction; import org.elasticsearch.rest.action.bulk.RestBulkAction;
import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.AbstractCatAction;
import org.elasticsearch.rest.action.cat.RestAliasAction; import org.elasticsearch.rest.action.cat.RestAliasAction;
@ -205,10 +202,6 @@ public class NetworkModule extends AbstractModule {
RestDeleteIndexTemplateAction.class, RestDeleteIndexTemplateAction.class,
RestHeadIndexTemplateAction.class, RestHeadIndexTemplateAction.class,
RestPutWarmerAction.class,
RestDeleteWarmerAction.class,
RestGetWarmerAction.class,
RestPutMappingAction.class, RestPutMappingAction.class,
RestGetMappingAction.class, RestGetMappingAction.class,
RestGetFieldMappingAction.class, RestGetFieldMappingAction.class,

View File

@ -519,6 +519,7 @@ public class BloomFilter {
return k; return k;
} }
@SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm
public static long hash3_x64_128(byte[] key, int offset, int length, long seed) { public static long hash3_x64_128(byte[] key, int offset, int length, long seed) {
final int nblocks = length >> 4; // Process as 128-bit blocks. final int nblocks = length >> 4; // Process as 128-bit blocks.
@ -598,7 +599,7 @@ public class BloomFilter {
case 2: case 2:
k1 ^= ((long) key[offset + 1]) << 8; k1 ^= ((long) key[offset + 1]) << 8;
case 1: case 1:
k1 ^= ((long) key[offset]); k1 ^= (key[offset]);
k1 *= c1; k1 *= c1;
k1 = rotl64(k1, 31); k1 = rotl64(k1, 31);
k1 *= c2; k1 *= c2;

View File

@ -45,7 +45,6 @@ import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -68,6 +67,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
@ -101,6 +101,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicBoolean closed = new AtomicBoolean(false);
private final AtomicBoolean deleted = new AtomicBoolean(false); private final AtomicBoolean deleted = new AtomicBoolean(false);
private final IndexSettings indexSettings; private final IndexSettings indexSettings;
private final IndexingSlowLog slowLog;
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
SimilarityService similarityService, SimilarityService similarityService,
@ -117,7 +118,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.analysisService = registry.build(indexSettings); this.analysisService = registry.build(indexSettings);
this.similarityService = similarityService; this.similarityService = similarityService;
this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry); this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::getQueryShardContext);
this.indexFieldData = new IndexFieldDataService(indexSettings, nodeServicesProvider.getIndicesFieldDataCache(), nodeServicesProvider.getCircuitBreakerService(), mapperService); this.indexFieldData = new IndexFieldDataService(indexSettings, nodeServicesProvider.getIndicesFieldDataCache(), nodeServicesProvider.getCircuitBreakerService(), mapperService);
this.shardStoreDeleter = shardStoreDeleter; this.shardStoreDeleter = shardStoreDeleter;
this.eventListener = eventListener; this.eventListener = eventListener;
@ -130,6 +131,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
this.engineFactory = engineFactory; this.engineFactory = engineFactory;
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
this.searcherWrapper = wrapperFactory.newWrapper(this); this.searcherWrapper = wrapperFactory.newWrapper(this);
this.slowLog = new IndexingSlowLog(indexSettings.getSettings());
} }
public int numberOfShards() { public int numberOfShards() {
@ -292,9 +294,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
if (useShadowEngine(primary, indexSettings)) { if (useShadowEngine(primary, indexSettings)) {
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); // no indexing listeners - shadow engines don't index
} else { } else {
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, slowLog);
} }
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
@ -552,6 +554,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
} catch (Exception e) { } catch (Exception e) {
logger.warn("failed to refresh index store settings", e); logger.warn("failed to refresh index store settings", e);
} }
try {
slowLog.onRefreshSettings(settings); // this will be refactored soon anyway so duplication is ok here
} catch (Exception e) {
logger.warn("failed to refresh slowlog settings", e);
}
} }
} }

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.index.indexing; package org.elasticsearch.index;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.shard.IndexingOperationListener;
import java.io.IOException; import java.io.IOException;
import java.util.Locale; import java.util.Locale;
@ -35,7 +36,7 @@ import java.util.concurrent.TimeUnit;
/** /**
*/ */
public final class IndexingSlowLog { public final class IndexingSlowLog implements IndexingOperationListener {
private boolean reformat; private boolean reformat;
@ -124,8 +125,9 @@ public final class IndexingSlowLog {
} }
} }
void postIndex(Engine.Index index, long tookInNanos) { public void postIndex(Engine.Index index) {
postIndexing(index.parsedDoc(), tookInNanos); final long took = index.endTime() - index.startTime();
postIndexing(index.parsedDoc(), took);
} }
/** /**

View File

@ -89,12 +89,12 @@ public class Analysis {
// check for explicit version on the specific analyzer component // check for explicit version on the specific analyzer component
String sVersion = settings.get("version"); String sVersion = settings.get("version");
if (sVersion != null) { if (sVersion != null) {
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger); return Lucene.parseVersion(sVersion, Version.LATEST, logger);
} }
// check for explicit version on the index itself as default for all analysis components // check for explicit version on the index itself as default for all analysis components
sVersion = indexSettings.get("index.analysis.version"); sVersion = indexSettings.get("index.analysis.version");
if (sVersion != null) { if (sVersion != null) {
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger); return Lucene.parseVersion(sVersion, Version.LATEST, logger);
} }
// resolve the analysis version based on the version the index was created with // resolve the analysis version based on the version the index was created with
return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion; return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion;

View File

@ -181,6 +181,7 @@ public final class AnalysisRegistry implements Closeable {
tokenizers.put("standard", StandardTokenizerFactory::new); tokenizers.put("standard", StandardTokenizerFactory::new);
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("keyword", KeywordTokenizerFactory::new); tokenizers.put("keyword", KeywordTokenizerFactory::new);
tokenizers.put("letter", LetterTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new);
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
@ -409,6 +410,7 @@ public final class AnalysisRegistry implements Closeable {
// Tokenizer aliases // Tokenizer aliases
tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT))); tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT))); tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("PathHierarchy", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.PATH_HIERARCHY.getTokenizerFactory(Version.CURRENT)));
// Token filters // Token filters

View File

@ -118,6 +118,12 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException { private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
final Object coreCacheReader = context.reader().getCoreCacheKey(); final Object coreCacheReader = context.reader().getCoreCacheKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader()); final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null // can't require it because of the percolator
&& indexSettings.getIndex().getName().equals(shardId.getIndex()) == false) {
// insanity
throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
+ "] with cache of index [" + indexSettings.getIndex().getName() + "]");
}
Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> { Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> {
context.reader().addCoreClosedListener(BitsetFilterCache.this); context.reader().addCoreClosedListener(BitsetFilterCache.this);
return CacheBuilder.<Query, Value>builder().build(); return CacheBuilder.<Query, Value>builder().build();
@ -208,6 +214,11 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
@Override @Override
public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
if (indexSettings.getIndex().equals(indexShard.getIndexSettings().getIndex()) == false) {
// this is from a different index
return TerminationHandle.NO_WAIT;
}
if (!loadRandomAccessFiltersEagerly) { if (!loadRandomAccessFiltersEagerly) {
return TerminationHandle.NO_WAIT; return TerminationHandle.NO_WAIT;
} }

View File

@ -50,6 +50,8 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
@ -178,10 +180,10 @@ public abstract class Engine implements Closeable {
* is enabled * is enabled
*/ */
protected static final class IndexThrottle { protected static final class IndexThrottle {
private final CounterMetric throttleTimeMillisMetric = new CounterMetric();
private volatile long startOfThrottleNS;
private static final ReleasableLock NOOP_LOCK = new ReleasableLock(new NoOpLock()); private static final ReleasableLock NOOP_LOCK = new ReleasableLock(new NoOpLock());
private final ReleasableLock lockReference = new ReleasableLock(new ReentrantLock()); private final ReleasableLock lockReference = new ReleasableLock(new ReentrantLock());
private volatile ReleasableLock lock = NOOP_LOCK; private volatile ReleasableLock lock = NOOP_LOCK;
public Releasable acquireThrottle() { public Releasable acquireThrottle() {
@ -191,6 +193,7 @@ public abstract class Engine implements Closeable {
/** Activate throttling, which switches the lock to be a real lock */ /** Activate throttling, which switches the lock to be a real lock */
public void activate() { public void activate() {
assert lock == NOOP_LOCK : "throttling activated while already active"; assert lock == NOOP_LOCK : "throttling activated while already active";
startOfThrottleNS = System.nanoTime();
lock = lockReference; lock = lockReference;
} }
@ -198,9 +201,47 @@ public abstract class Engine implements Closeable {
public void deactivate() { public void deactivate() {
assert lock != NOOP_LOCK : "throttling deactivated but not active"; assert lock != NOOP_LOCK : "throttling deactivated but not active";
lock = NOOP_LOCK; lock = NOOP_LOCK;
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
if (throttleTimeNS >= 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
} }
} }
long getThrottleTimeInMillis() {
long currentThrottleNS = 0;
if (isThrottled() && startOfThrottleNS != 0) {
currentThrottleNS += System.nanoTime() - startOfThrottleNS;
if (currentThrottleNS < 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value
currentThrottleNS = 0;
}
}
return throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS);
}
boolean isThrottled() {
return lock != NOOP_LOCK;
}
}
/**
* Returns the number of milliseconds this engine was under index throttling.
*/
public long getIndexThrottleTimeInMillis() {
return 0;
}
/**
* Returns the <code>true</code> iff this engine is currently under index throttling.
* @see #getIndexThrottleTimeInMillis()
*/
public boolean isThrottled() {
return false;
}
/** A Lock implementation that always allows the lock to be acquired */ /** A Lock implementation that always allows the lock to be acquired */
protected static final class NoOpLock implements Lock { protected static final class NoOpLock implements Lock {
@ -936,7 +977,7 @@ public abstract class Engine implements Closeable {
} }
} }
public static class GetResult { public static class GetResult implements Releasable {
private final boolean exists; private final boolean exists;
private final long version; private final long version;
private final Translog.Source source; private final Translog.Source source;
@ -982,6 +1023,11 @@ public abstract class Engine implements Closeable {
return docIdAndVersion; return docIdAndVersion;
} }
@Override
public void close() {
release();
}
public void release() { public void release() {
if (searcher != null) { if (searcher != null) {
searcher.close(); searcher.close();

View File

@ -30,13 +30,12 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -58,7 +57,6 @@ public final class EngineConfig {
private final TimeValue flushMergesAfter; private final TimeValue flushMergesAfter;
private final String codecName; private final String codecName;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final ShardIndexingService indexingService;
private final Engine.Warmer warmer; private final Engine.Warmer warmer;
private final Store store; private final Store store;
private final SnapshotDeletionPolicy deletionPolicy; private final SnapshotDeletionPolicy deletionPolicy;
@ -108,7 +106,7 @@ public final class EngineConfig {
/** /**
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig} * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
*/ */
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, public EngineConfig(ShardId shardId, ThreadPool threadPool,
IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer, MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.EventListener eventListener, Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
@ -117,9 +115,7 @@ public final class EngineConfig {
final Settings settings = indexSettings.getSettings(); final Settings settings = indexSettings.getSettings();
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.threadPool = threadPool; this.threadPool = threadPool;
this.indexingService = indexingService; this.warmer = warmer == null ? (a,b) -> {} : warmer;
this.warmer = warmer == null ? (a, b) -> {
} : warmer;
this.store = store; this.store = store;
this.deletionPolicy = deletionPolicy; this.deletionPolicy = deletionPolicy;
this.mergePolicy = mergePolicy; this.mergePolicy = mergePolicy;
@ -197,7 +193,7 @@ public final class EngineConfig {
} }
/** /**
* Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link org.elasticsearch.indices.memory.IndexingMemoryController} * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link IndexingMemoryController}
*/ */
public ByteSizeValue getIndexingBufferSize() { public ByteSizeValue getIndexingBufferSize() {
return indexingBufferSize; return indexingBufferSize;
@ -241,17 +237,6 @@ public final class EngineConfig {
return threadPool; return threadPool;
} }
/**
* Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about
* pre and post index. The operations are used for statistic purposes etc.
*
* @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index)
* @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index)
*/
public ShardIndexingService getIndexingService() {
return indexingService;
}
/** /**
* Returns an {@link org.elasticsearch.index.engine.Engine.Warmer} used to warm new searchers before they are used for searching. * Returns an {@link org.elasticsearch.index.engine.Engine.Warmer} used to warm new searchers before they are used for searching.
*/ */

View File

@ -59,7 +59,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.merge.OnGoingMerge;
@ -97,7 +96,6 @@ public class InternalEngine extends Engine {
*/ */
private volatile long lastDeleteVersionPruneTimeMSec; private volatile long lastDeleteVersionPruneTimeMSec;
private final ShardIndexingService indexingService;
private final Engine.Warmer warmer; private final Engine.Warmer warmer;
private final Translog translog; private final Translog translog;
private final ElasticsearchConcurrentMergeScheduler mergeScheduler; private final ElasticsearchConcurrentMergeScheduler mergeScheduler;
@ -135,7 +133,6 @@ public class InternalEngine extends Engine {
boolean success = false; boolean success = false;
try { try {
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
this.indexingService = engineConfig.getIndexingService();
this.warmer = engineConfig.getWarmer(); this.warmer = engineConfig.getWarmer();
seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings()); seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings());
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig()); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig());
@ -430,8 +427,6 @@ public class InternalEngine extends Engine {
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
index.setTranslogLocation(translogLocation); index.setTranslogLocation(translogLocation);
indexingService.postIndexUnderLock(index);
return created; return created;
} finally { } finally {
if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
@ -543,7 +538,6 @@ public class InternalEngine extends Engine {
Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation)); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation));
delete.setTranslogLocation(translogLocation); delete.setTranslogLocation(translogLocation);
indexingService.postDeleteUnderLock(delete);
} finally { } finally {
if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
seqNoService.markSeqNoAsCompleted(delete.seqNo()); seqNoService.markSeqNoAsCompleted(delete.seqNo());
@ -989,8 +983,7 @@ public class InternalEngine extends Engine {
}); });
return new IndexWriter(store.directory(), iwc); return new IndexWriter(store.directory(), iwc);
} catch (LockObtainFailedException ex) { } catch (LockObtainFailedException ex) {
boolean isLocked = IndexWriter.isLocked(store.directory()); logger.warn("could not lock IndexWriter", ex);
logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
throw ex; throw ex;
} }
} }
@ -1083,6 +1076,10 @@ public class InternalEngine extends Engine {
throttle.deactivate(); throttle.deactivate();
} }
public long getIndexThrottleTimeInMillis() {
return throttle.getThrottleTimeInMillis();
}
long getGcDeletesInMillis() { long getGcDeletesInMillis() {
return engineConfig.getGcDeletesInMillis(); return engineConfig.getGcDeletesInMillis();
} }
@ -1105,7 +1102,6 @@ public class InternalEngine extends Engine {
if (numMergesInFlight.incrementAndGet() > maxNumMerges) { if (numMergesInFlight.incrementAndGet() > maxNumMerges) {
if (isThrottling.getAndSet(true) == false) { if (isThrottling.getAndSet(true) == false) {
logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
indexingService.throttlingActivated();
activateThrottling(); activateThrottling();
} }
} }
@ -1117,7 +1113,6 @@ public class InternalEngine extends Engine {
if (numMergesInFlight.decrementAndGet() < maxNumMerges) { if (numMergesInFlight.decrementAndGet() < maxNumMerges) {
if (isThrottling.getAndSet(false)) { if (isThrottling.getAndSet(false)) {
logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
indexingService.throttlingDeactivated();
deactivateThrottling(); deactivateThrottling();
} }
} }

View File

@ -58,11 +58,16 @@ public class SingleFieldsVisitor extends FieldsVisitor {
public void postProcess(MappedFieldType fieldType) { public void postProcess(MappedFieldType fieldType) {
if (uid != null) { if (uid != null) {
// TODO: this switch seems very wrong...either each case should be breaking, or this should not be a switch
switch (field) { switch (field) {
case UidFieldMapper.NAME: addValue(field, uid.toString()); case UidFieldMapper.NAME:
case IdFieldMapper.NAME: addValue(field, uid.id()); addValue(field, uid.toString());
case TypeFieldMapper.NAME: addValue(field, uid.type()); break;
case IdFieldMapper.NAME:
addValue(field, uid.id());
break;
case TypeFieldMapper.NAME:
addValue(field, uid.type());
break;
} }
} }

View File

@ -1,89 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.indexing;
import org.elasticsearch.index.engine.Engine;
/**
* An indexing listener for indexing, delete, events.
*/
public abstract class IndexingOperationListener {
/**
* Called before the indexing occurs.
*/
public Engine.Index preIndex(Engine.Index operation) {
return operation;
}
/**
* Called after the indexing occurs, under a locking scheme to maintain
* concurrent updates to the same doc.
* <p>
* Note, long operations should not occur under this callback.
*/
public void postIndexUnderLock(Engine.Index index) {
}
/**
* Called after the indexing operation occurred.
*/
public void postIndex(Engine.Index index) {
}
/**
* Called after the indexing operation occurred with exception.
*/
public void postIndex(Engine.Index index, Throwable ex) {
}
/**
* Called before the delete occurs.
*/
public Engine.Delete preDelete(Engine.Delete delete) {
return delete;
}
/**
* Called after the delete occurs, under a locking scheme to maintain
* concurrent updates to the same doc.
* <p>
* Note, long operations should not occur under this callback.
*/
public void postDeleteUnderLock(Engine.Delete delete) {
}
/**
* Called after the delete operation occurred.
*/
public void postDelete(Engine.Delete delete) {
}
/**
* Called after the delete operation occurred with exception.
*/
public void postDelete(Engine.Delete delete, Throwable ex) {
}
}

View File

@ -1,286 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.indexing;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyMap;
/**
*/
public class ShardIndexingService extends AbstractIndexShardComponent {
private final IndexingSlowLog slowLog;
private final StatsHolder totalStats = new StatsHolder();
private final CopyOnWriteArrayList<IndexingOperationListener> listeners = new CopyOnWriteArrayList<>();
private volatile Map<String, StatsHolder> typesStats = emptyMap();
public ShardIndexingService(ShardId shardId, IndexSettings indexSettings) {
super(shardId, indexSettings);
this.slowLog = new IndexingSlowLog(this.indexSettings.getSettings());
}
/**
* Returns the stats, including type specific stats. If the types are null/0 length, then nothing
* is returned for them. If they are set, then only types provided will be returned, or
* <tt>_all</tt> for all types.
*/
public IndexingStats stats(String... types) {
IndexingStats.Stats total = totalStats.stats();
Map<String, IndexingStats.Stats> typesSt = null;
if (types != null && types.length > 0) {
typesSt = new HashMap<>(typesStats.size());
if (types.length == 1 && types[0].equals("_all")) {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
typesSt.put(entry.getKey(), entry.getValue().stats());
}
} else {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
if (Regex.simpleMatch(types, entry.getKey())) {
typesSt.put(entry.getKey(), entry.getValue().stats());
}
}
}
}
return new IndexingStats(total, typesSt);
}
public void addListener(IndexingOperationListener listener) {
listeners.add(listener);
}
public void removeListener(IndexingOperationListener listener) {
listeners.remove(listener);
}
public void throttlingActivated() {
totalStats.setThrottled(true);
}
public void throttlingDeactivated() {
totalStats.setThrottled(false);
}
public Engine.Index preIndex(Engine.Index operation) {
totalStats.indexCurrent.inc();
typeStats(operation.type()).indexCurrent.inc();
for (IndexingOperationListener listener : listeners) {
operation = listener.preIndex(operation);
}
return operation;
}
public void postIndexUnderLock(Engine.Index index) {
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndexUnderLock(index);
} catch (Exception e) {
logger.warn("postIndexUnderLock listener [{}] failed", e, listener);
}
}
}
public void postIndex(Engine.Index index) {
long took = index.endTime() - index.startTime();
totalStats.indexMetric.inc(took);
totalStats.indexCurrent.dec();
StatsHolder typeStats = typeStats(index.type());
typeStats.indexMetric.inc(took);
typeStats.indexCurrent.dec();
slowLog.postIndex(index, took);
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndex(index);
} catch (Exception e) {
logger.warn("postIndex listener [{}] failed", e, listener);
}
}
}
public void postIndex(Engine.Index index, Throwable ex) {
totalStats.indexCurrent.dec();
typeStats(index.type()).indexCurrent.dec();
totalStats.indexFailed.inc();
typeStats(index.type()).indexFailed.inc();
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndex(index, ex);
} catch (Throwable t) {
logger.warn("postIndex listener [{}] failed", t, listener);
}
}
}
public Engine.Delete preDelete(Engine.Delete delete) {
totalStats.deleteCurrent.inc();
typeStats(delete.type()).deleteCurrent.inc();
for (IndexingOperationListener listener : listeners) {
delete = listener.preDelete(delete);
}
return delete;
}
public void postDeleteUnderLock(Engine.Delete delete) {
for (IndexingOperationListener listener : listeners) {
try {
listener.postDeleteUnderLock(delete);
} catch (Exception e) {
logger.warn("postDeleteUnderLock listener [{}] failed", e, listener);
}
}
}
public void postDelete(Engine.Delete delete) {
long took = delete.endTime() - delete.startTime();
totalStats.deleteMetric.inc(took);
totalStats.deleteCurrent.dec();
StatsHolder typeStats = typeStats(delete.type());
typeStats.deleteMetric.inc(took);
typeStats.deleteCurrent.dec();
for (IndexingOperationListener listener : listeners) {
try {
listener.postDelete(delete);
} catch (Exception e) {
logger.warn("postDelete listener [{}] failed", e, listener);
}
}
}
public void postDelete(Engine.Delete delete, Throwable ex) {
totalStats.deleteCurrent.dec();
typeStats(delete.type()).deleteCurrent.dec();
for (IndexingOperationListener listener : listeners) {
try {
listener. postDelete(delete, ex);
} catch (Throwable t) {
logger.warn("postDelete listener [{}] failed", t, listener);
}
}
}
public void noopUpdate(String type) {
totalStats.noopUpdates.inc();
typeStats(type).noopUpdates.inc();
}
public void clear() {
totalStats.clear();
synchronized (this) {
if (!typesStats.isEmpty()) {
MapBuilder<String, StatsHolder> typesStatsBuilder = MapBuilder.newMapBuilder();
for (Map.Entry<String, StatsHolder> typeStats : typesStats.entrySet()) {
if (typeStats.getValue().totalCurrent() > 0) {
typeStats.getValue().clear();
typesStatsBuilder.put(typeStats.getKey(), typeStats.getValue());
}
}
typesStats = typesStatsBuilder.immutableMap();
}
}
}
private StatsHolder typeStats(String type) {
StatsHolder stats = typesStats.get(type);
if (stats == null) {
synchronized (this) {
stats = typesStats.get(type);
if (stats == null) {
stats = new StatsHolder();
typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap();
}
}
}
return stats;
}
public void onRefreshSettings(Settings settings) {
slowLog.onRefreshSettings(settings);
}
static class StatsHolder {
public final MeanMetric indexMetric = new MeanMetric();
public final MeanMetric deleteMetric = new MeanMetric();
public final CounterMetric indexCurrent = new CounterMetric();
public final CounterMetric indexFailed = new CounterMetric();
public final CounterMetric deleteCurrent = new CounterMetric();
public final CounterMetric noopUpdates = new CounterMetric();
public final CounterMetric throttleTimeMillisMetric = new CounterMetric();
volatile boolean isThrottled = false;
volatile long startOfThrottleNS;
public IndexingStats.Stats stats() {
long currentThrottleNS = 0;
if (isThrottled && startOfThrottleNS != 0) {
currentThrottleNS += System.nanoTime() - startOfThrottleNS;
if (currentThrottleNS < 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value
currentThrottleNS = 0;
}
}
return new IndexingStats.Stats(
indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(),
deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(),
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS)));
}
void setThrottled(boolean isThrottled) {
if (!this.isThrottled && isThrottled) {
startOfThrottleNS = System.nanoTime();
} else if (this.isThrottled && !isThrottled) {
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
if (throttleTimeNS >= 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
}
}
this.isThrottled = isThrottled;
}
public long totalCurrent() {
return indexCurrent.count() + deleteMetric.count();
}
public void clear() {
indexMetric.clear();
deleteMetric.clear();
}
}
}

View File

@ -33,12 +33,14 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.function.Supplier;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.index.mapper.MapperBuilders.doc; import static org.elasticsearch.index.mapper.MapperBuilders.doc;
@ -49,6 +51,7 @@ public class DocumentMapperParser {
final AnalysisService analysisService; final AnalysisService analysisService;
private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class); private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class);
private final SimilarityService similarityService; private final SimilarityService similarityService;
private final Supplier<QueryShardContext> queryShardContextSupplier;
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser(); private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
@ -59,18 +62,20 @@ public class DocumentMapperParser {
private final Map<String, MetadataFieldMapper.TypeParser> rootTypeParsers; private final Map<String, MetadataFieldMapper.TypeParser> rootTypeParsers;
public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService, public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService,
SimilarityService similarityService, MapperRegistry mapperRegistry) { SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings()); this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings());
this.mapperService = mapperService; this.mapperService = mapperService;
this.analysisService = analysisService; this.analysisService = analysisService;
this.similarityService = similarityService; this.similarityService = similarityService;
this.queryShardContextSupplier = queryShardContextSupplier;
this.typeParsers = mapperRegistry.getMapperParsers(); this.typeParsers = mapperRegistry.getMapperParsers();
this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers();
indexVersionCreated = indexSettings.getIndexVersionCreated(); indexVersionCreated = indexSettings.getIndexVersionCreated();
} }
public Mapper.TypeParser.ParserContext parserContext(String type) { public Mapper.TypeParser.ParserContext parserContext(String type) {
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher); return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
} }
public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {

View File

@ -26,6 +26,8 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityProvider;
import java.util.Map; import java.util.Map;
@ -95,9 +97,11 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final ParseFieldMatcher parseFieldMatcher; private final ParseFieldMatcher parseFieldMatcher;
private final QueryShardContext queryShardContext;
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService, public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
MapperService mapperService, Function<String, TypeParser> typeParsers, MapperService mapperService, Function<String, TypeParser> typeParsers,
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) { Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) {
this.type = type; this.type = type;
this.analysisService = analysisService; this.analysisService = analysisService;
this.similarityLookupService = similarityLookupService; this.similarityLookupService = similarityLookupService;
@ -105,6 +109,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
this.typeParsers = typeParsers; this.typeParsers = typeParsers;
this.indexVersionCreated = indexVersionCreated; this.indexVersionCreated = indexVersionCreated;
this.parseFieldMatcher = parseFieldMatcher; this.parseFieldMatcher = parseFieldMatcher;
this.queryShardContext = queryShardContext;
} }
public String type() { public String type() {
@ -135,6 +140,10 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return parseFieldMatcher; return parseFieldMatcher;
} }
public QueryShardContext queryShardContext() {
return queryShardContext;
}
public boolean isWithinMultiField() { return false; } public boolean isWithinMultiField() { return false; }
protected Function<String, TypeParser> typeParsers() { return typeParsers; } protected Function<String, TypeParser> typeParsers() { return typeParsers; }
@ -150,7 +159,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
static class MultiFieldParserContext extends ParserContext { static class MultiFieldParserContext extends ParserContext {
MultiFieldParserContext(ParserContext in) { MultiFieldParserContext(ParserContext in) {
super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher()); super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
} }
} }

View File

@ -44,6 +44,7 @@ import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.TypeMissingException;
@ -64,12 +65,12 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function; import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
/** /**
@ -116,11 +117,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
final MapperRegistry mapperRegistry; final MapperRegistry mapperRegistry;
public MapperService(IndexSettings indexSettings, AnalysisService analysisService, public MapperService(IndexSettings indexSettings, AnalysisService analysisService,
SimilarityService similarityService, MapperRegistry mapperRegistry) { SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
super(indexSettings); super(indexSettings);
this.analysisService = analysisService; this.analysisService = analysisService;
this.fieldTypes = new FieldTypeLookup(); this.fieldTypes = new FieldTypeLookup();
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry); this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry, queryShardContextSupplier);
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer()); this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer()); this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
@ -131,8 +133,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
"\"_default_\":{\n" + "\"_default_\":{\n" +
"\"properties\" : {\n" + "\"properties\" : {\n" +
"\"query\" : {\n" + "\"query\" : {\n" +
"\"type\" : \"object\",\n" + "\"type\" : \"percolator\"\n" +
"\"enabled\" : false\n" +
"}\n" + "}\n" +
"}\n" + "}\n" +
"}\n" + "}\n" +

View File

@ -0,0 +1,233 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Utility to extract query terms from queries and create queries from documents.
*/
public final class ExtractQueryTermsService {
private static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point
private ExtractQueryTermsService() {
}
/**
* Extracts all terms from the specified query and adds it to the specified document.
* @param query The query to extract terms from
* @param document The document to add the extracted terms to
* @param queryTermsFieldField The field in the document holding the extracted terms
* @param unknownQueryField The field used to mark a document that not all query terms could be extracted. For example
* the query contained an unsupported query (e.g. WildcardQuery).
* @param fieldType The field type for the query metadata field
*/
public static void extractQueryTerms(Query query, ParseContext.Document document, String queryTermsFieldField, String unknownQueryField, FieldType fieldType) {
Set<Term> queryTerms;
try {
queryTerms = extractQueryTerms(query);
} catch (UnsupportedQueryException e) {
document.add(new Field(unknownQueryField, new BytesRef(), fieldType));
return;
}
for (Term term : queryTerms) {
BytesRefBuilder builder = new BytesRefBuilder();
builder.append(new BytesRef(term.field()));
builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term.bytes());
document.add(new Field(queryTermsFieldField, builder.toBytesRef(), fieldType));
}
}
/**
* Extracts all query terms from the provided query and adds it to specified list.
*
* From boolean query with no should clauses or phrase queries only the the longest term are selected,
* since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored.
*
* If from part of the query, no query terms can be extracted then term extraction is stopped and
* an UnsupportedQueryException is thrown.
*/
static Set<Term> extractQueryTerms(Query query) {
// TODO: add support for the TermsQuery when it has methods to access the actual terms it encapsulates
// TODO: add support for span queries
if (query instanceof TermQuery) {
return Collections.singleton(((TermQuery) query).getTerm());
} else if (query instanceof PhraseQuery) {
Term[] terms = ((PhraseQuery) query).getTerms();
if (terms.length == 0) {
return Collections.emptySet();
}
// the longest term is likely to be the rarest,
// so from a performance perspective it makes sense to extract that
Term longestTerm = terms[0];
for (Term term : terms) {
if (longestTerm.bytes().length < term.bytes().length) {
longestTerm = term;
}
}
return Collections.singleton(longestTerm);
} else if (query instanceof BooleanQuery) {
List<BooleanClause> clauses = ((BooleanQuery) query).clauses();
boolean hasRequiredClauses = false;
for (BooleanClause clause : clauses) {
if (clause.isRequired()) {
hasRequiredClauses = true;
break;
}
}
if (hasRequiredClauses) {
Set<Term> bestClause = null;
for (BooleanClause clause : clauses) {
if (clause.isRequired() == false) {
// skip must_not clauses, we don't need to remember the things that do *not* match...
// skip should clauses, this bq has must clauses, so we don't need to remember should clauses, since they are completely optional.
continue;
}
Set<Term> temp = extractQueryTerms(clause.getQuery());
bestClause = selectTermListWithTheLongestShortestTerm(temp, bestClause);
}
if (bestClause != null) {
return bestClause;
} else {
return Collections.emptySet();
}
} else {
Set<Term> terms = new HashSet<>();
for (BooleanClause clause : clauses) {
if (clause.isProhibited()) {
// we don't need to remember the things that do *not* match...
continue;
}
terms.addAll(extractQueryTerms(clause.getQuery()));
}
return terms;
}
} else if (query instanceof ConstantScoreQuery) {
Query wrappedQuery = ((ConstantScoreQuery) query).getQuery();
return extractQueryTerms(wrappedQuery);
} else if (query instanceof BoostQuery) {
Query wrappedQuery = ((BoostQuery) query).getQuery();
return extractQueryTerms(wrappedQuery);
} else {
throw new UnsupportedQueryException(query);
}
}
static Set<Term> selectTermListWithTheLongestShortestTerm(Set<Term> terms1, Set<Term> terms2) {
if (terms1 == null) {
return terms2;
} else if (terms2 == null) {
return terms1;
} else {
int terms1ShortestTerm = minTermLength(terms1);
int terms2ShortestTerm = minTermLength(terms2);
// keep the clause with longest terms, this likely to be rarest.
if (terms1ShortestTerm >= terms2ShortestTerm) {
return terms1;
} else {
return terms2;
}
}
}
private static int minTermLength(Set<Term> terms) {
int min = Integer.MAX_VALUE;
for (Term term : terms) {
min = Math.min(min, term.bytes().length);
}
return min;
}
/**
* Creates a boolean query with a should clause for each term on all fields of the specified index reader.
*/
public static Query createQueryTermsQuery(IndexReader indexReader, String queryMetadataField, String unknownQueryField) throws IOException {
List<Term> extractedTerms = new ArrayList<>();
extractedTerms.add(new Term(unknownQueryField));
Fields fields = MultiFields.getFields(indexReader);
for (String field : fields) {
Terms terms = fields.terms(field);
if (terms == null) {
continue;
}
BytesRef fieldBr = new BytesRef(field);
TermsEnum tenum = terms.iterator();
for (BytesRef term = tenum.next(); term != null ; term = tenum.next()) {
BytesRefBuilder builder = new BytesRefBuilder();
builder.append(fieldBr);
builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term);
extractedTerms.add(new Term(queryMetadataField, builder.toBytesRef()));
}
}
return new TermsQuery(extractedTerms);
}
/**
* Exception indicating that none or some query terms couldn't extracted from a percolator query.
*/
public static class UnsupportedQueryException extends RuntimeException {
private final Query unsupportedQuery;
public UnsupportedQueryException(Query unsupportedQuery) {
super(LoggerMessageFormat.format("no query terms can be extracted from query [{}]", unsupportedQuery));
this.unsupportedQuery = unsupportedQuery;
}
/**
* The actual Lucene query that was unsupported and caused this exception to be thrown.
*/
public Query getUnsupportedQuery() {
return unsupportedQuery;
}
}
}

View File

@ -0,0 +1,150 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperBuilders;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class PercolatorFieldMapper extends FieldMapper {
public static final String NAME = "query";
public static final String CONTENT_TYPE = "percolator";
public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType();
private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query";
public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME;
public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME;
public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {
private final QueryShardContext queryShardContext;
public Builder(QueryShardContext queryShardContext) {
super(NAME, FIELD_TYPE, FIELD_TYPE);
this.queryShardContext = queryShardContext;
}
@Override
public PercolatorFieldMapper build(BuilderContext context) {
context.path().add(name);
StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
context.path().remove();
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField);
}
static StringFieldMapper.Builder createStringFieldBuilder(String name) {
StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name);
queryMetaDataFieldBuilder.docValues(false);
queryMetaDataFieldBuilder.store(false);
queryMetaDataFieldBuilder.tokenized(false);
queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
return queryMetaDataFieldBuilder;
}
}
public static class TypeParser implements FieldMapper.TypeParser {
@Override
public Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
return new Builder(parserContext.queryShardContext());
}
}
public static final class PercolatorFieldType extends MappedFieldType {
public PercolatorFieldType() {
setName(NAME);
setIndexOptions(IndexOptions.NONE);
setDocValuesType(DocValuesType.NONE);
setStored(false);
}
public PercolatorFieldType(MappedFieldType ref) {
super(ref);
}
@Override
public MappedFieldType clone() {
return new PercolatorFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
}
private final boolean mapUnmappedFieldAsString;
private final QueryShardContext queryShardContext;
private final StringFieldMapper queryTermsField;
private final StringFieldMapper unknownQueryField;
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.queryShardContext = queryShardContext;
this.queryTermsField = queryTermsField;
this.unknownQueryField = unknownQueryField;
this.mapUnmappedFieldAsString = indexSettings.getAsBoolean(PercolatorQueriesRegistry.MAP_UNMAPPED_FIELDS_AS_STRING, false);
}
@Override
public Mapper parse(ParseContext context) throws IOException {
QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext);
Query query = PercolatorQueriesRegistry.parseQuery(queryShardContext, mapUnmappedFieldAsString, context.parser());
if (context.flyweight() == false) {
ExtractQueryTermsService.extractQueryTerms(query, context.doc(), queryTermsField.name(), unknownQueryField.name(), queryTermsField.fieldType());
}
return null;
}
@Override
public Iterator<Mapper> iterator() {
return Arrays.<Mapper>asList(queryTermsField, unknownQueryField).iterator();
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
throw new UnsupportedOperationException("should not be invoked");
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
}

View File

@ -31,19 +31,15 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.indexing.IndexingOperationListener;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentTypeListener;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -54,7 +50,6 @@ import java.io.IOException;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/** /**
* Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index. * Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index.
@ -65,45 +60,27 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/ */
public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable { public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string"; public final static String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
// This is a shard level service, but these below are index level service:
private final MapperService mapperService;
private final IndexFieldDataService indexFieldDataService;
private final ShardIndexingService indexingService;
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener();
private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener();
private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false);
private final QueryShardContext queryShardContext; private final QueryShardContext queryShardContext;
private boolean mapUnmappedFieldsAsString; private boolean mapUnmappedFieldsAsString;
private final MeanMetric percolateMetric = new MeanMetric(); private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric(); private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric(); private final CounterMetric numberOfQueries = new CounterMetric();
public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) {
ShardIndexingService indexingService, MapperService mapperService,
QueryShardContext queryShardContext,
IndexFieldDataService indexFieldDataService) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.mapperService = mapperService;
this.indexingService = indexingService;
this.queryShardContext = queryShardContext; this.queryShardContext = queryShardContext;
this.indexFieldDataService = indexFieldDataService;
this.mapUnmappedFieldsAsString = this.indexSettings.getSettings().getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false); this.mapUnmappedFieldsAsString = this.indexSettings.getSettings().getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false);
mapperService.addTypeListener(percolateTypeListener);
} }
public ConcurrentMap<BytesRef, Query> percolateQueries() { public ConcurrentMap<BytesRef, Query> getPercolateQueries() {
return percolateQueries; return percolateQueries;
} }
@Override @Override
public void close() { public void close() {
mapperService.removeTypeListener(percolateTypeListener);
indexingService.removeListener(realTimePercolatorOperationListener);
clear(); clear();
} }
@ -111,11 +88,6 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
percolateQueries.clear(); percolateQueries.clear();
} }
public void enableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(false, true)) {
indexingService.addListener(realTimePercolatorOperationListener);
}
}
public void addPercolateQuery(String idAsString, BytesReference source) { public void addPercolateQuery(String idAsString, BytesReference source) {
Query newquery = parsePercolatorDocument(idAsString, source); Query newquery = parsePercolatorDocument(idAsString, source);
@ -133,9 +105,7 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
} }
} }
Query parsePercolatorDocument(String id, BytesReference source) { public Query parsePercolatorDocument(String id, BytesReference source) {
String type = null;
BytesReference querySource = null;
try (XContentParser sourceParser = XContentHelper.createParser(source)) { try (XContentParser sourceParser = XContentHelper.createParser(source)) {
String currentFieldName = null; String currentFieldName = null;
XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT
@ -147,38 +117,21 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
currentFieldName = sourceParser.currentName(); currentFieldName = sourceParser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) { } else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) { if ("query".equals(currentFieldName)) {
if (type != null) { return parseQuery(queryShardContext, mapUnmappedFieldsAsString, sourceParser);
return parseQuery(type, sourceParser);
} else {
XContentBuilder builder = XContentFactory.contentBuilder(sourceParser.contentType());
builder.copyCurrentStructure(sourceParser);
querySource = builder.bytes();
builder.close();
}
} else { } else {
sourceParser.skipChildren(); sourceParser.skipChildren();
} }
} else if (token == XContentParser.Token.START_ARRAY) { } else if (token == XContentParser.Token.START_ARRAY) {
sourceParser.skipChildren(); sourceParser.skipChildren();
} else if (token.isValue()) {
if ("type".equals(currentFieldName)) {
type = sourceParser.text();
} }
} }
}
try (XContentParser queryParser = XContentHelper.createParser(querySource)) {
return parseQuery(type, queryParser);
}
} catch (Exception e) { } catch (Exception e) {
throw new PercolatorException(shardId().index(), "failed to parse query [" + id + "]", e); throw new PercolatorException(shardId().index(), "failed to parse query [" + id + "]", e);
} }
return null;
} }
private Query parseQuery(String type, XContentParser parser) { public static Query parseQuery(QueryShardContext queryShardContext, boolean mapUnmappedFieldsAsString, XContentParser parser) {
String[] previousTypes = null;
if (type != null) {
previousTypes = QueryShardContext.setTypesWithPrevious(type);
}
QueryShardContext context = new QueryShardContext(queryShardContext); QueryShardContext context = new QueryShardContext(queryShardContext);
try { try {
context.reset(parser); context.reset(parser);
@ -200,29 +153,16 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
} catch (IOException e) { } catch (IOException e) {
throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e);
} finally { } finally {
if (type != null) {
QueryShardContext.setTypes(previousTypes);
}
context.reset(null); context.reset(null);
} }
} }
private class PercolateTypeListener implements DocumentTypeListener {
@Override
public void beforeCreate(DocumentMapper mapper) {
if (PercolatorService.TYPE_NAME.equals(mapper.type())) {
enableRealTimePercolator();
}
}
}
public void loadQueries(IndexReader reader) { public void loadQueries(IndexReader reader) {
logger.trace("loading percolator queries..."); logger.trace("loading percolator queries...");
final int loadedQueries; final int loadedQueries;
try { try {
Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger);
IndexSearcher indexSearcher = new IndexSearcher(reader); IndexSearcher indexSearcher = new IndexSearcher(reader);
indexSearcher.setQueryCache(null); indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector); indexSearcher.search(query, queryCollector);
@ -238,30 +178,26 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
logger.debug("done loading [{}] percolator queries", loadedQueries); logger.debug("done loading [{}] percolator queries", loadedQueries);
} }
private class RealTimePercolatorOperationListener extends IndexingOperationListener { public boolean isPercolatorQuery(Engine.Index operation) {
@Override
public Engine.Index preIndex(Engine.Index operation) {
// validate the query here, before we index
if (PercolatorService.TYPE_NAME.equals(operation.type())) { if (PercolatorService.TYPE_NAME.equals(operation.type())) {
parsePercolatorDocument(operation.id(), operation.source()); parsePercolatorDocument(operation.id(), operation.source());
return true;
} }
return operation; return false;
} }
@Override public boolean isPercolatorQuery(Engine.Delete operation) {
public void postIndexUnderLock(Engine.Index index) { return PercolatorService.TYPE_NAME.equals(operation.type());
// add the query under a doc lock
if (PercolatorService.TYPE_NAME.equals(index.type())) {
addPercolateQuery(index.id(), index.source());
}
} }
@Override public synchronized void updatePercolateQuery(Engine engine, String id) {
public void postDeleteUnderLock(Engine.Delete delete) { // this can be called out of order as long as for every change to a percolator document it's invoked. This will always
// remove the query under a lock // fetch the latest change but might fetch the same change twice if updates / deletes happen concurrently.
if (PercolatorService.TYPE_NAME.equals(delete.type())) { try (Engine.GetResult getResult = engine.get(new Engine.Get(true, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(PercolatorService.TYPE_NAME, id))))) {
removePercolateQuery(delete.id()); if (getResult.exists()) {
addPercolateQuery(id, getResult.source().source);
} else {
removePercolateQuery(id);
} }
} }
} }

View File

@ -45,17 +45,13 @@ final class QueriesLoaderCollector extends SimpleCollector {
private final Map<BytesRef, Query> queries = new HashMap<>(); private final Map<BytesRef, Query> queries = new HashMap<>();
private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true); private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true);
private final PercolatorQueriesRegistry percolator; private final PercolatorQueriesRegistry percolator;
private final IndexFieldData<?> uidFieldData;
private final ESLogger logger; private final ESLogger logger;
private SortedBinaryDocValues uidValues;
private LeafReader reader; private LeafReader reader;
QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger, MapperService mapperService, IndexFieldDataService indexFieldDataService) { QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger) {
this.percolator = percolator; this.percolator = percolator;
this.logger = logger; this.logger = logger;
final MappedFieldType uidMapper = mapperService.fullName(UidFieldMapper.NAME);
this.uidFieldData = indexFieldDataService.getForField(uidMapper);
} }
public Map<BytesRef, Query> queries() { public Map<BytesRef, Query> queries() {
@ -64,35 +60,27 @@ final class QueriesLoaderCollector extends SimpleCollector {
@Override @Override
public void collect(int doc) throws IOException { public void collect(int doc) throws IOException {
// the _source is the query
uidValues.setDocument(doc);
if (uidValues.count() > 0) {
assert uidValues.count() == 1;
final BytesRef uid = uidValues.valueAt(0);
final BytesRef id = Uid.splitUidIntoTypeAndId(uid)[1];
fieldsVisitor.reset(); fieldsVisitor.reset();
reader.document(doc, fieldsVisitor); reader.document(doc, fieldsVisitor);
final Uid uid = fieldsVisitor.uid();
try { try {
// id is only used for logging, if we fail we log the id in the catch statement // id is only used for logging, if we fail we log the id in the catch statement
final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source()); final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source());
if (parseQuery != null) { if (parseQuery != null) {
queries.put(BytesRef.deepCopyOf(id), parseQuery); queries.put(new BytesRef(uid.id()), parseQuery);
} else { } else {
logger.warn("failed to add query [{}] - parser returned null", id); logger.warn("failed to add query [{}] - parser returned null", uid);
} }
} catch (Exception e) { } catch (Exception e) {
logger.warn("failed to add query [{}]", e, id.utf8ToString()); logger.warn("failed to add query [{}]", e, uid);
}
} }
} }
@Override @Override
protected void doSetNextReader(LeafReaderContext context) throws IOException { protected void doSetNextReader(LeafReaderContext context) throws IOException {
reader = context.reader(); reader = context.reader();
uidValues = uidFieldData.load(context).getBytesValues();
} }
@Override @Override

View File

@ -81,8 +81,6 @@ import org.elasticsearch.index.fielddata.ShardFieldData;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.get.ShardGetService;
import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
@ -115,7 +113,7 @@ import org.elasticsearch.index.warmer.ShardIndexWarmerService;
import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.memory.IndexingMemoryController; import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
@ -127,6 +125,8 @@ import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException; import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -145,7 +145,7 @@ public class IndexShard extends AbstractIndexShardComponent {
private final IndexCache indexCache; private final IndexCache indexCache;
private final Store store; private final Store store;
private final MergeSchedulerConfig mergeSchedulerConfig; private final MergeSchedulerConfig mergeSchedulerConfig;
private final ShardIndexingService indexingService; private final InternalIndexingStats internalIndexingStats;
private final ShardSearchStats searchService; private final ShardSearchStats searchService;
private final ShardGetService getService; private final ShardGetService getService;
private final ShardIndexWarmerService shardWarmerService; private final ShardIndexWarmerService shardWarmerService;
@ -169,7 +169,6 @@ public class IndexShard extends AbstractIndexShardComponent {
private final IndexEventListener indexEventListener; private final IndexEventListener indexEventListener;
private final IndexSettings idxSettings; private final IndexSettings idxSettings;
private final NodeServicesProvider provider; private final NodeServicesProvider provider;
private TimeValue refreshInterval; private TimeValue refreshInterval;
private volatile ScheduledFuture<?> refreshScheduledFuture; private volatile ScheduledFuture<?> refreshScheduledFuture;
@ -178,6 +177,8 @@ public class IndexShard extends AbstractIndexShardComponent {
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
protected final EngineFactory engineFactory; protected final EngineFactory engineFactory;
private final IndexingOperationListener indexingOperationListeners;
@Nullable @Nullable
private RecoveryState recoveryState; private RecoveryState recoveryState;
@ -217,7 +218,7 @@ public class IndexShard extends AbstractIndexShardComponent {
public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache,
MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService,
@Nullable EngineFactory engineFactory, @Nullable EngineFactory engineFactory,
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider) { IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, IndexingOperationListener... listeners) {
super(shardId, indexSettings); super(shardId, indexSettings);
final Settings settings = indexSettings.getSettings(); final Settings settings = indexSettings.getSettings();
this.inactiveTime = settings.getAsTime(INDEX_SHARD_INACTIVE_TIME_SETTING, settings.getAsTime(INDICES_INACTIVE_TIME_SETTING, TimeValue.timeValueMinutes(5))); this.inactiveTime = settings.getAsTime(INDEX_SHARD_INACTIVE_TIME_SETTING, settings.getAsTime(INDICES_INACTIVE_TIME_SETTING, TimeValue.timeValueMinutes(5)));
@ -234,7 +235,10 @@ public class IndexShard extends AbstractIndexShardComponent {
this.threadPool = provider.getThreadPool(); this.threadPool = provider.getThreadPool();
this.mapperService = mapperService; this.mapperService = mapperService;
this.indexCache = indexCache; this.indexCache = indexCache;
this.indexingService = new ShardIndexingService(shardId, indexSettings); this.internalIndexingStats = new InternalIndexingStats();
final List<IndexingOperationListener> listenersList = new ArrayList<>(Arrays.asList(listeners));
listenersList.add(internalIndexingStats);
this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listenersList, logger);
this.getService = new ShardGetService(indexSettings, this, mapperService); this.getService = new ShardGetService(indexSettings, this, mapperService);
this.termVectorsService = provider.getTermVectorsService(); this.termVectorsService = provider.getTermVectorsService();
this.searchService = new ShardSearchStats(settings); this.searchService = new ShardSearchStats(settings);
@ -270,12 +274,7 @@ public class IndexShard extends AbstractIndexShardComponent {
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.provider = provider; this.provider = provider;
this.searcherWrapper = indexSearcherWrapper; this.searcherWrapper = indexSearcherWrapper;
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, indexingService, mapperService, newQueryShardContext(), indexFieldDataService); this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
percolatorQueriesRegistry.enableRealTimePercolator();
}
// We start up inactive // We start up inactive
active.set(false); active.set(false);
} }
@ -293,10 +292,6 @@ public class IndexShard extends AbstractIndexShardComponent {
return true; return true;
} }
public ShardIndexingService indexingService() {
return this.indexingService;
}
public ShardGetService getService() { public ShardGetService getService() {
return this.getService; return this.getService;
} }
@ -510,19 +505,24 @@ public class IndexShard extends AbstractIndexShardComponent {
public boolean index(Engine.Index index) { public boolean index(Engine.Index index) {
ensureWriteAllowed(index); ensureWriteAllowed(index);
markLastWrite(); markLastWrite();
index = indexingService.preIndex(index); index = indexingOperationListeners.preIndex(index);
final boolean created; final boolean created;
try { try {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
} }
created = getEngine().index(index); final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(index);
Engine engine = getEngine();
created = engine.index(index);
if (isPercolatorQuery) {
percolatorQueriesRegistry.updatePercolateQuery(engine, index.id());
}
index.endTime(System.nanoTime()); index.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postIndex(index, ex); indexingOperationListeners.postIndex(index, ex);
throw ex; throw ex;
} }
indexingService.postIndex(index); indexingOperationListeners.postIndex(index);
return created; return created;
} }
@ -553,18 +553,23 @@ public class IndexShard extends AbstractIndexShardComponent {
public void delete(Engine.Delete delete) { public void delete(Engine.Delete delete) {
ensureWriteAllowed(delete); ensureWriteAllowed(delete);
markLastWrite(); markLastWrite();
delete = indexingService.preDelete(delete); delete = indexingOperationListeners.preDelete(delete);
try { try {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text()); logger.trace("delete [{}]", delete.uid().text());
} }
getEngine().delete(delete); final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(delete);
Engine engine = getEngine();
engine.delete(delete);
if (isPercolatorQuery) {
percolatorQueriesRegistry.updatePercolateQuery(engine, delete.id());
}
delete.endTime(System.nanoTime()); delete.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postDelete(delete, ex); indexingOperationListeners.postDelete(delete, ex);
throw ex; throw ex;
} }
indexingService.postDelete(delete); indexingOperationListeners.postDelete(delete);
} }
public Engine.GetResult get(Engine.Get get) { public Engine.GetResult get(Engine.Get get) {
@ -615,7 +620,17 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
public IndexingStats indexingStats(String... types) { public IndexingStats indexingStats(String... types) {
return indexingService.stats(types); Engine engine = getEngineOrNull();
final boolean throttled;
final long throttleTimeInMillis;
if (engine == null) {
throttled = false;
throttleTimeInMillis = 0;
} else {
throttled = engine.isThrottled();
throttleTimeInMillis = engine.getIndexThrottleTimeInMillis();
}
return internalIndexingStats.stats(throttled, throttleTimeInMillis, types);
} }
public SearchStats searchStats(String... groups) { public SearchStats searchStats(String... groups) {
@ -1241,7 +1256,6 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
mergePolicyConfig.onRefreshSettings(settings); mergePolicyConfig.onRefreshSettings(settings);
searchService.onRefreshSettings(settings); searchService.onRefreshSettings(settings);
indexingService.onRefreshSettings(settings);
if (change) { if (change) {
getEngine().onSettingsChanged(); getEngine().onSettingsChanged();
} }
@ -1277,6 +1291,14 @@ public class IndexShard extends AbstractIndexShardComponent {
return inactiveTime; return inactiveTime;
} }
/**
* Should be called for each no-op update operation to increment relevant statistics.
* @param type the doc type of the update
*/
public void noopUpdate(String type) {
internalIndexingStats.noopUpdate(type);
}
class EngineRefresher implements Runnable { class EngineRefresher implements Runnable {
@Override @Override
public void run() { public void run() {
@ -1501,7 +1523,7 @@ public class IndexShard extends AbstractIndexShardComponent {
}; };
final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel); final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel);
return new EngineConfig(shardId, return new EngineConfig(shardId,
threadPool, indexingService, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, threadPool, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, inactiveTime); mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, inactiveTime);
} }

View File

@ -0,0 +1,152 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.engine.Engine;
import java.util.List;
/**
* An indexing listener for indexing, delete, events.
*/
public interface IndexingOperationListener {
/**
* Called before the indexing occurs.
*/
default Engine.Index preIndex(Engine.Index operation) {
return operation;
}
/**
* Called after the indexing operation occurred.
*/
default void postIndex(Engine.Index index) {}
/**
* Called after the indexing operation occurred with exception.
*/
default void postIndex(Engine.Index index, Throwable ex) {}
/**
* Called before the delete occurs.
*/
default Engine.Delete preDelete(Engine.Delete delete) {
return delete;
}
/**
* Called after the delete operation occurred.
*/
default void postDelete(Engine.Delete delete) {}
/**
* Called after the delete operation occurred with exception.
*/
default void postDelete(Engine.Delete delete, Throwable ex) {}
/**
* A Composite listener that multiplexes calls to each of the listeners methods.
*/
final class CompositeListener implements IndexingOperationListener{
private final List<IndexingOperationListener> listeners;
private final ESLogger logger;
public CompositeListener(List<IndexingOperationListener> listeners, ESLogger logger) {
this.listeners = listeners;
this.logger = logger;
}
@Override
public Engine.Index preIndex(Engine.Index operation) {
assert operation != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.preIndex(operation);
} catch (Throwable t) {
logger.warn("preIndex listener [{}] failed", t, listener);
}
}
return operation;
}
@Override
public void postIndex(Engine.Index index) {
assert index != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndex(index);
} catch (Throwable t) {
logger.warn("postIndex listener [{}] failed", t, listener);
}
}
}
@Override
public void postIndex(Engine.Index index, Throwable ex) {
assert index != null && ex != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndex(index, ex);
} catch (Throwable t) {
logger.warn("postIndex listener [{}] failed", t, listener);
}
}
}
@Override
public Engine.Delete preDelete(Engine.Delete delete) {
assert delete != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.preDelete(delete);
} catch (Throwable t) {
logger.warn("preDelete listener [{}] failed", t, listener);
}
}
return delete;
}
@Override
public void postDelete(Engine.Delete delete) {
assert delete != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.postDelete(delete);
} catch (Throwable t) {
logger.warn("postDelete listener [{}] failed", t, listener);
}
}
}
@Override
public void postDelete(Engine.Delete delete, Throwable ex) {
assert delete != null && ex != null;
for (IndexingOperationListener listener : listeners) {
try {
listener.postDelete(delete, ex);
} catch (Throwable t) {
logger.warn("postDelete listener [{}] failed", t, listener);
}
}
}
}
}

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.index.indexing; package org.elasticsearch.index.shard;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;

View File

@ -0,0 +1,154 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.index.engine.Engine;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyMap;
/**
* Internal class that maintains relevant indexing statistics / metrics.
* @see IndexShard
*/
final class InternalIndexingStats implements IndexingOperationListener {
private final StatsHolder totalStats = new StatsHolder();
private volatile Map<String, StatsHolder> typesStats = emptyMap();
/**
* Returns the stats, including type specific stats. If the types are null/0 length, then nothing
* is returned for them. If they are set, then only types provided will be returned, or
* <tt>_all</tt> for all types.
*/
IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... types) {
IndexingStats.Stats total = totalStats.stats(isThrottled, currentThrottleInMillis);
Map<String, IndexingStats.Stats> typesSt = null;
if (types != null && types.length > 0) {
typesSt = new HashMap<>(typesStats.size());
if (types.length == 1 && types[0].equals("_all")) {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis));
}
} else {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
if (Regex.simpleMatch(types, entry.getKey())) {
typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis));
}
}
}
}
return new IndexingStats(total, typesSt);
}
@Override
public Engine.Index preIndex(Engine.Index operation) {
totalStats.indexCurrent.inc();
typeStats(operation.type()).indexCurrent.inc();
return operation;
}
@Override
public void postIndex(Engine.Index index) {
long took = index.endTime() - index.startTime();
totalStats.indexMetric.inc(took);
totalStats.indexCurrent.dec();
StatsHolder typeStats = typeStats(index.type());
typeStats.indexMetric.inc(took);
typeStats.indexCurrent.dec();
}
@Override
public void postIndex(Engine.Index index, Throwable ex) {
totalStats.indexCurrent.dec();
typeStats(index.type()).indexCurrent.dec();
totalStats.indexFailed.inc();
typeStats(index.type()).indexFailed.inc();
}
@Override
public Engine.Delete preDelete(Engine.Delete delete) {
totalStats.deleteCurrent.inc();
typeStats(delete.type()).deleteCurrent.inc();
return delete;
}
@Override
public void postDelete(Engine.Delete delete) {
long took = delete.endTime() - delete.startTime();
totalStats.deleteMetric.inc(took);
totalStats.deleteCurrent.dec();
StatsHolder typeStats = typeStats(delete.type());
typeStats.deleteMetric.inc(took);
typeStats.deleteCurrent.dec();
}
@Override
public void postDelete(Engine.Delete delete, Throwable ex) {
totalStats.deleteCurrent.dec();
typeStats(delete.type()).deleteCurrent.dec();
}
public void noopUpdate(String type) {
totalStats.noopUpdates.inc();
typeStats(type).noopUpdates.inc();
}
private StatsHolder typeStats(String type) {
StatsHolder stats = typesStats.get(type);
if (stats == null) {
synchronized (this) {
stats = typesStats.get(type);
if (stats == null) {
stats = new StatsHolder();
typesStats = MapBuilder.newMapBuilder(typesStats).put(type, stats).immutableMap();
}
}
}
return stats;
}
static class StatsHolder {
private final MeanMetric indexMetric = new MeanMetric();
private final MeanMetric deleteMetric = new MeanMetric();
private final CounterMetric indexCurrent = new CounterMetric();
private final CounterMetric indexFailed = new CounterMetric();
private final CounterMetric deleteCurrent = new CounterMetric();
private final CounterMetric noopUpdates = new CounterMetric();
IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) {
return new IndexingStats.Stats(
indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(),
deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(),
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis));
}
void clear() {
indexMetric.clear();
deleteMetric.clear();
}
}
}

View File

@ -70,6 +70,7 @@ public class TranslogRecoveryPerformer {
performRecoveryOperation(engine, operation, false); performRecoveryOperation(engine, operation, false);
numOps++; numOps++;
} }
engine.getTranslog().sync();
} catch (Throwable t) { } catch (Throwable t) {
throw new BatchOperationException(shardId, "failed to apply batch translog operation", numOps, t); throw new BatchOperationException(shardId, "failed to apply batch translog operation", numOps, t);
} }

View File

@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.IndexShardComponent;
import java.io.Closeable; import java.io.Closeable;
import java.io.EOFException; import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
import java.nio.file.DirectoryStream; import java.nio.file.DirectoryStream;
import java.nio.file.Files; import java.nio.file.Files;
@ -163,6 +164,21 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try { try {
if (translogGeneration != null) { if (translogGeneration != null) {
final Checkpoint checkpoint = readCheckpoint(); final Checkpoint checkpoint = readCheckpoint();
final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1));
final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
// this is special handling for error condition when we create a new writer but we fail to bake
// the newly written file (generation+1) into the checkpoint. This is still a valid state
// we just need to cleanup before we continue
// we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this:
// https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example
//
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists
// if not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
}
this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint);
if (recoveredTranslogs.isEmpty()) { if (recoveredTranslogs.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered"); throw new IllegalStateException("at least one reader must be recovered");
@ -425,7 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (config.isSyncOnEachOperation()) { if (config.isSyncOnEachOperation()) {
current.sync(); current.sync();
} }
assert current.assertBytesAtLocation(location, bytes); assert assertBytesAtLocation(location, bytes);
return location; return location;
} }
} catch (AlreadyClosedException | IOException ex) { } catch (AlreadyClosedException | IOException ex) {
@ -439,6 +455,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
} }
} }
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
// tests can override this
ByteBuffer buffer = ByteBuffer.allocate(location.size);
current.readBytes(buffer, location.translogLocation);
return new BytesArray(buffer.array()).equals(expectedBytes);
}
/** /**
* Snapshots the current transaction log allowing to safely iterate over the snapshot. * Snapshots the current transaction log allowing to safely iterate over the snapshot.
* Snapshots are fixed in time and will not be updated with future operations. * Snapshots are fixed in time and will not be updated with future operations.

View File

@ -69,9 +69,17 @@ public class TranslogWriter extends TranslogReader {
totalOffset = lastSyncedOffset; totalOffset = lastSyncedOffset;
} }
static int getHeaderLength(String translogUUID) {
return getHeaderLength(new BytesRef(translogUUID).length);
}
private static int getHeaderLength(int uuidLength) {
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT;
}
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException { public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
final BytesRef ref = new BytesRef(translogUUID); final BytesRef ref = new BytesRef(translogUUID);
final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT; final int headerLength = getHeaderLength(ref.length);
final FileChannel channel = channelFactory.open(file); final FileChannel channel = channelFactory.open(file);
try { try {
// This OutputStreamDataOutput is intentionally not closed because // This OutputStreamDataOutput is intentionally not closed because
@ -80,17 +88,14 @@ public class TranslogWriter extends TranslogReader {
CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION); CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION);
out.writeInt(ref.length); out.writeInt(ref.length);
out.writeBytes(ref.bytes, ref.offset, ref.length); out.writeBytes(ref.bytes, ref.offset, ref.length);
channel.force(false); channel.force(true);
writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE); writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE);
final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize); final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize);
return writer; return writer;
} catch (Throwable throwable){ } catch (Throwable throwable){
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
// file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
IOUtils.closeWhileHandlingException(channel); IOUtils.closeWhileHandlingException(channel);
try {
Files.delete(file); // remove the file as well
} catch (IOException ex) {
throwable.addSuppressed(ex);
}
throw throwable; throw throwable;
} }
} }
@ -213,11 +218,6 @@ public class TranslogWriter extends TranslogReader {
} }
} }
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(location.size);
readBytes(buffer, location.translogLocation);
return new BytesArray(buffer.array()).equals(expectedBytes);
}
private long getWrittenOffset() throws IOException { private long getWrittenOffset() throws IOException {
return channelReference.getChannel().position(); return channelReference.getChannel().position();

View File

@ -17,10 +17,9 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.indices.memory; package org.elasticsearch.indices;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
@ -32,16 +31,16 @@ import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
public class IndexingMemoryController extends AbstractLifecycleComponent<IndexingMemoryController> implements IndexEventListener { public class IndexingMemoryController extends AbstractComponent implements IndexEventListener, Closeable {
/** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */ /** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */
public static final String INDEX_BUFFER_SIZE_SETTING = "indices.memory.index_buffer_size"; public static final String INDEX_BUFFER_SIZE_SETTING = "indices.memory.index_buffer_size";
@ -70,10 +69,6 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
/** Once a shard becomes inactive, we reduce the {@code IndexWriter} buffer to this value (500 KB) to let active shards use the heap instead. */ /** Once a shard becomes inactive, we reduce the {@code IndexWriter} buffer to this value (500 KB) to let active shards use the heap instead. */
public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER"); public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER");
/** Once a shard becomes inactive, we reduce the {@code Translog} buffer to this value (1 KB) to let active shards use the heap instead. */
public static final ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb", "INACTIVE_SHARD_TRANSLOG_BUFFER");
private final ThreadPool threadPool;
private final IndicesService indicesService; private final IndicesService indicesService;
private final ByteSizeValue indexingBuffer; private final ByteSizeValue indexingBuffer;
@ -81,22 +76,20 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
private final ByteSizeValue maxShardIndexBufferSize; private final ByteSizeValue maxShardIndexBufferSize;
private final TimeValue interval; private final TimeValue interval;
private volatile ScheduledFuture scheduler; private final ScheduledFuture scheduler;
private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of( private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(
IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
private final ShardsIndicesStatusChecker statusChecker; private final ShardsIndicesStatusChecker statusChecker;
@Inject IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) {
public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) {
this(settings, threadPool, indicesService, JvmInfo.jvmInfo().getMem().getHeapMax().bytes()); this(settings, threadPool, indicesService, JvmInfo.jvmInfo().getMem().getHeapMax().bytes());
} }
// for testing // for testing
protected IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService, long jvmMemoryInBytes) { IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService, long jvmMemoryInBytes) {
super(settings); super(settings);
this.threadPool = threadPool;
this.indicesService = indicesService; this.indicesService = indicesService;
ByteSizeValue indexingBuffer; ByteSizeValue indexingBuffer;
@ -131,29 +124,24 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
MIN_SHARD_INDEX_BUFFER_SIZE_SETTING, this.minShardIndexBufferSize, MIN_SHARD_INDEX_BUFFER_SIZE_SETTING, this.minShardIndexBufferSize,
MAX_SHARD_INDEX_BUFFER_SIZE_SETTING, this.maxShardIndexBufferSize, MAX_SHARD_INDEX_BUFFER_SIZE_SETTING, this.maxShardIndexBufferSize,
SHARD_INACTIVE_INTERVAL_TIME_SETTING, this.interval); SHARD_INACTIVE_INTERVAL_TIME_SETTING, this.interval);
this.scheduler = scheduleTask(threadPool);
} }
@Override protected ScheduledFuture<?> scheduleTask(ThreadPool threadPool) {
protected void doStart() {
// it's fine to run it on the scheduler thread, no busy work // it's fine to run it on the scheduler thread, no busy work
this.scheduler = threadPool.scheduleWithFixedDelay(statusChecker, interval); return threadPool.scheduleWithFixedDelay(statusChecker, interval);
} }
@Override @Override
protected void doStop() { public void close() {
FutureUtils.cancel(scheduler); FutureUtils.cancel(scheduler);
scheduler = null;
}
@Override
protected void doClose() {
} }
/** /**
* returns the current budget for the total amount of indexing buffers of * returns the current budget for the total amount of indexing buffers of
* active shards on this node * active shards on this node
*/ */
public ByteSizeValue indexingBufferSize() { ByteSizeValue indexingBufferSize() {
return indexingBuffer; return indexingBuffer;
} }
@ -188,7 +176,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
} }
/** check if any shards active status changed, now. */ /** check if any shards active status changed, now. */
public void forceCheck() { void forceCheck() {
statusChecker.run(); statusChecker.run();
} }

View File

@ -57,6 +57,7 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.query.BoolQueryParser; import org.elasticsearch.index.query.BoolQueryParser;
import org.elasticsearch.index.query.BoostingQueryParser; import org.elasticsearch.index.query.BoostingQueryParser;
import org.elasticsearch.index.query.CommonTermsQueryParser; import org.elasticsearch.index.query.CommonTermsQueryParser;
@ -111,7 +112,6 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.memory.IndexingMemoryController;
import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.recovery.RecoverySource; import org.elasticsearch.indices.recovery.RecoverySource;
@ -212,6 +212,7 @@ public class IndicesModule extends AbstractModule {
registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser()); registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
registerMapper(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser());
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
@ -273,7 +274,6 @@ public class IndicesModule extends AbstractModule {
bind(RecoverySource.class).asEagerSingleton(); bind(RecoverySource.class).asEagerSingleton();
bind(IndicesStore.class).asEagerSingleton(); bind(IndicesStore.class).asEagerSingleton();
bind(IndicesClusterStateService.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton();
bind(IndexingMemoryController.class).asEagerSingleton();
bind(SyncedFlushService.class).asEagerSingleton(); bind(SyncedFlushService.class).asEagerSingleton();
bind(IndicesQueryCache.class).asEagerSingleton(); bind(IndicesQueryCache.class).asEagerSingleton();
bind(IndicesRequestCache.class).asEagerSingleton(); bind(IndicesRequestCache.class).asEagerSingleton();

View File

@ -52,7 +52,7 @@ import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
@ -65,6 +65,7 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
@ -105,6 +106,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
private final OldShardsStats oldShardsStats = new OldShardsStats(); private final OldShardsStats oldShardsStats = new OldShardsStats();
private final IndexStoreConfig indexStoreConfig; private final IndexStoreConfig indexStoreConfig;
private final MapperRegistry mapperRegistry; private final MapperRegistry mapperRegistry;
private final IndexingMemoryController indexingMemoryController;
@Override @Override
protected void doStart() { protected void doStart() {
@ -114,7 +116,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv,
ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry, ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry,
IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, MapperRegistry mapperRegistry) { ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool) {
super(settings); super(settings);
this.pluginsService = pluginsService; this.pluginsService = pluginsService;
this.nodeEnv = nodeEnv; this.nodeEnv = nodeEnv;
@ -127,7 +129,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
this.mapperRegistry = mapperRegistry; this.mapperRegistry = mapperRegistry;
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType);
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle);
indexingMemoryController = new IndexingMemoryController(settings, threadPool, this);
} }
@Override @Override
@ -161,7 +163,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
@Override @Override
protected void doClose() { protected void doClose() {
IOUtils.closeWhileHandlingException(analysisRegistry); IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController);
} }
/** /**
@ -293,6 +295,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry); final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry);
pluginsService.onIndexModule(indexModule); pluginsService.onIndexModule(indexModule);
indexModule.addIndexEventListener(indexingMemoryController);
for (IndexEventListener listener : builtInListeners) { for (IndexEventListener listener : builtInListeners) {
indexModule.addIndexEventListener(listener); indexModule.addIndexEventListener(listener);
} }

View File

@ -36,7 +36,7 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;

View File

@ -63,7 +63,6 @@ import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardRepository;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.memory.IndexingMemoryController;
import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoverySource; import org.elasticsearch.indices.recovery.RecoverySource;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
@ -130,9 +129,9 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
NodeMappingRefreshAction nodeMappingRefreshAction, NodeMappingRefreshAction nodeMappingRefreshAction,
RepositoriesService repositoriesService, RestoreService restoreService, RepositoriesService repositoriesService, RestoreService restoreService,
SearchService searchService, SyncedFlushService syncedFlushService, SearchService searchService, SyncedFlushService syncedFlushService,
RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider, IndexingMemoryController indexingMemoryController) { RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) {
super(settings); super(settings);
this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTarget, searchService, syncedFlushService, indexingMemoryController); this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTarget, searchService, syncedFlushService);
this.indicesService = indicesService; this.indicesService = indicesService;
this.clusterService = clusterService; this.clusterService = clusterService;
this.threadPool = threadPool; this.threadPool = threadPool;

View File

@ -69,8 +69,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.memory.IndexingMemoryController;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.MonitorService;
@ -249,7 +247,6 @@ public class Node implements Releasable {
injector.getInstance(MappingUpdatedAction.class).setClient(client); injector.getInstance(MappingUpdatedAction.class).setClient(client);
injector.getInstance(IndicesService.class).start(); injector.getInstance(IndicesService.class).start();
injector.getInstance(IndexingMemoryController.class).start();
injector.getInstance(IndicesClusterStateService.class).start(); injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(IndicesTTLService.class).start(); injector.getInstance(IndicesTTLService.class).start();
injector.getInstance(SnapshotsService.class).start(); injector.getInstance(SnapshotsService.class).start();
@ -308,7 +305,6 @@ public class Node implements Releasable {
// stop any changes happening as a result of cluster state changes // stop any changes happening as a result of cluster state changes
injector.getInstance(IndicesClusterStateService.class).stop(); injector.getInstance(IndicesClusterStateService.class).stop();
// we close indices first, so operations won't be allowed on it // we close indices first, so operations won't be allowed on it
injector.getInstance(IndexingMemoryController.class).stop();
injector.getInstance(IndicesTTLService.class).stop(); injector.getInstance(IndicesTTLService.class).stop();
injector.getInstance(RoutingService.class).stop(); injector.getInstance(RoutingService.class).stop();
injector.getInstance(ClusterService.class).stop(); injector.getInstance(ClusterService.class).stop();
@ -360,7 +356,6 @@ public class Node implements Releasable {
stopWatch.stop().start("indices_cluster"); stopWatch.stop().start("indices_cluster");
injector.getInstance(IndicesClusterStateService.class).close(); injector.getInstance(IndicesClusterStateService.class).close();
stopWatch.stop().start("indices"); stopWatch.stop().start("indices");
injector.getInstance(IndexingMemoryController.class).close();
injector.getInstance(IndicesTTLService.class).close(); injector.getInstance(IndicesTTLService.class).close();
injector.getInstance(IndicesService.class).close(); injector.getInstance(IndicesService.class).close();
// close filter/fielddata caches after indices // close filter/fielddata caches after indices

View File

@ -22,6 +22,7 @@ package org.elasticsearch.node.internal;
import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
@ -41,7 +42,6 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.common.Strings.cleanPath; import static org.elasticsearch.common.Strings.cleanPath;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -207,7 +207,7 @@ public class InternalSettingsPreparer {
name = reader.readLine(); name = reader.readLine();
} }
} }
int index = ThreadLocalRandom.current().nextInt(names.size()); int index = Randomness.get().nextInt(names.size());
return names.get(index); return names.get(index);
} catch (IOException e) { } catch (IOException e) {
throw new RuntimeException("Could not read node names list", e); throw new RuntimeException("Could not read node names list", e);

View File

@ -29,9 +29,15 @@ import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
@ -76,7 +82,17 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex {
try { try {
MultiReader mReader = new MultiReader(memoryIndices, true); MultiReader mReader = new MultiReader(memoryIndices, true);
LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader);
final IndexSearcher slowSearcher = new IndexSearcher(slowReader); final IndexSearcher slowSearcher = new IndexSearcher(slowReader) {
@Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(query, BooleanClause.Occur.MUST);
bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT);
return super.createNormalizedWeight(bq.build(), needsScores);
}
};
slowSearcher.setQueryCache(null); slowSearcher.setQueryCache(null);
DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex);
context.initialize(docSearcher, parsedDocument); context.initialize(docSearcher, parsedDocument);

View File

@ -88,11 +88,7 @@ import java.util.concurrent.ConcurrentMap;
*/ */
public class PercolateContext extends SearchContext { public class PercolateContext extends SearchContext {
private final PercolatorQueriesRegistry percolateQueryRegistry; private int size = 10;
public boolean limit;
private int size;
public boolean doSort;
public byte percolatorTypeId;
private boolean trackScores; private boolean trackScores;
private final SearchShardTarget searchShardTarget; private final SearchShardTarget searchShardTarget;
@ -102,10 +98,12 @@ public class PercolateContext extends SearchContext {
private final PageCacheRecycler pageCacheRecycler; private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final ScriptService scriptService; private final ScriptService scriptService;
private final MapperService mapperService;
private final int numberOfShards; private final int numberOfShards;
private final Query aliasFilter; private final Query aliasFilter;
private final long originNanoTime = System.nanoTime(); private final long originNanoTime = System.nanoTime();
private final long startTime; private final long startTime;
private final boolean onlyCount;
private String[] types; private String[] types;
private Engine.Searcher docSearcher; private Engine.Searcher docSearcher;
@ -131,8 +129,8 @@ public class PercolateContext extends SearchContext {
this.indexShard = indexShard; this.indexShard = indexShard;
this.indexService = indexService; this.indexService = indexService;
this.fieldDataService = indexService.fieldData(); this.fieldDataService = indexService.fieldData();
this.mapperService = indexService.mapperService();
this.searchShardTarget = searchShardTarget; this.searchShardTarget = searchShardTarget;
this.percolateQueryRegistry = indexShard.percolateRegistry();
this.types = new String[]{request.documentType()}; this.types = new String[]{request.documentType()};
this.pageCacheRecycler = pageCacheRecycler; this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays.withCircuitBreaking(); this.bigArrays = bigArrays.withCircuitBreaking();
@ -143,6 +141,24 @@ public class PercolateContext extends SearchContext {
this.numberOfShards = request.getNumberOfShards(); this.numberOfShards = request.getNumberOfShards();
this.aliasFilter = aliasFilter; this.aliasFilter = aliasFilter;
this.startTime = request.getStartTime(); this.startTime = request.getStartTime();
this.onlyCount = request.onlyCount();
}
// for testing:
PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, MapperService mapperService) {
super(null, request);
this.searchShardTarget = searchShardTarget;
this.mapperService = mapperService;
this.indexService = null;
this.indexShard = null;
this.fieldDataService = null;
this.pageCacheRecycler = null;
this.bigArrays = null;
this.scriptService = null;
this.aliasFilter = null;
this.startTime = 0;
this.numberOfShards = 0;
this.onlyCount = true;
} }
public IndexSearcher docSearcher() { public IndexSearcher docSearcher() {
@ -177,10 +193,6 @@ public class PercolateContext extends SearchContext {
return indexService; return indexService;
} }
public ConcurrentMap<BytesRef, Query> percolateQueries() {
return percolateQueryRegistry.percolateQueries();
}
public Query percolateQuery() { public Query percolateQuery() {
return percolateQuery; return percolateQuery;
} }
@ -196,6 +208,14 @@ public class PercolateContext extends SearchContext {
return hitContext; return hitContext;
} }
public boolean isOnlyCount() {
return onlyCount;
}
public Query percolatorTypeFilter(){
return indexService().mapperService().documentMapper(PercolatorService.TYPE_NAME).typeFilter();
}
@Override @Override
public SearchContextHighlight highlight() { public SearchContextHighlight highlight() {
return highlight; return highlight;
@ -230,7 +250,7 @@ public class PercolateContext extends SearchContext {
@Override @Override
public MapperService mapperService() { public MapperService mapperService() {
return indexService.mapperService(); return mapperService;
} }
@Override @Override
@ -531,7 +551,6 @@ public class PercolateContext extends SearchContext {
@Override @Override
public SearchContext size(int size) { public SearchContext size(int size) {
this.size = size; this.size = size;
this.limit = true;
return this; return this;
} }

View File

@ -0,0 +1,224 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.highlight.HighlightPhase;
import org.elasticsearch.search.sort.SortParseElement;
import java.util.Map;
import static org.elasticsearch.index.mapper.SourceToParse.source;
public class PercolateDocumentParser {
private final HighlightPhase highlightPhase;
private final SortParseElement sortParseElement;
private final AggregationPhase aggregationPhase;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public PercolateDocumentParser(HighlightPhase highlightPhase, SortParseElement sortParseElement, AggregationPhase aggregationPhase, MappingUpdatedAction mappingUpdatedAction) {
this.highlightPhase = highlightPhase;
this.sortParseElement = sortParseElement;
this.aggregationPhase = aggregationPhase;
this.mappingUpdatedAction = mappingUpdatedAction;
}
public ParsedDocument parse(PercolateShardRequest request, PercolateContext context, MapperService mapperService, QueryShardContext queryShardContext) {
BytesReference source = request.source();
if (source == null || source.length() == 0) {
if (request.docSource() != null && request.docSource().length() != 0) {
return parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType());
} else {
return null;
}
}
// TODO: combine all feature parse elements into one map
Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements();
ParsedDocument doc = null;
// Some queries (function_score query when for decay functions) rely on a SearchContext being set:
// We switch types because this context needs to be in the context of the percolate queries in the shard and
// not the in memory percolate doc
String[] previousTypes = context.types();
context.types(new String[]{PercolatorService.TYPE_NAME});
try (XContentParser parser = XContentFactory.xContent(source).createParser(source);) {
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
// we need to check the "doc" here, so the next token will be START_OBJECT which is
// the actual document starting
if ("doc".equals(currentFieldName)) {
if (doc != null) {
throw new ElasticsearchParseException("Either specify doc or get, not both");
}
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType());
String index = context.shardTarget().index();
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true));
if (docMapper.getMapping() != null) {
doc.addDynamicMappingsUpdate(docMapper.getMapping());
}
if (doc.dynamicMappingsUpdate() != null) {
mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate());
}
// the document parsing exists the "doc" object, so we need to set the new current field.
currentFieldName = parser.currentName();
}
} else if (token == XContentParser.Token.START_OBJECT) {
SearchParseElement element = hlElements.get(currentFieldName);
if (element == null) {
element = aggregationElements.get(currentFieldName);
}
if ("query".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
}
context.percolateQuery(queryShardContext.parse(parser).query());
} else if ("filter".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
}
Query filter = queryShardContext.parseInnerFilter(parser).query();
context.percolateQuery(new ConstantScoreQuery(filter));
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if (element != null) {
element.parse(parser, context);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
}
} else if (token == null) {
break;
} else if (token.isValue()) {
if ("size".equals(currentFieldName)) {
context.size(parser.intValue());
if (context.size() < 0) {
throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size());
}
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
context.trackScores(parser.booleanValue());
}
}
}
// We need to get the actual source from the request body for highlighting, so parse the request body again
// and only get the doc source.
if (context.highlight() != null) {
parser.close();
currentFieldName = null;
try (XContentParser parserForHighlighter = XContentFactory.xContent(source).createParser(source)) {
token = parserForHighlighter.nextToken();
assert token == XContentParser.Token.START_OBJECT;
while ((token = parserForHighlighter.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parserForHighlighter.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("doc".equals(currentFieldName)) {
BytesStreamOutput bStream = new BytesStreamOutput();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
builder.copyCurrentStructure(parserForHighlighter);
builder.close();
doc.setSource(bStream.bytes());
break;
} else {
parserForHighlighter.skipChildren();
}
} else if (token == null) {
break;
}
}
}
}
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
} finally {
context.types(previousTypes);
}
if (request.docSource() != null && request.docSource().length() != 0) {
if (doc != null) {
throw new IllegalArgumentException("Can't specify the document to percolate in the source of the request and as document id");
}
doc = parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType());
}
if (doc == null) {
throw new IllegalArgumentException("Nothing to percolate");
}
return doc;
}
private void parseSort(XContentParser parser, PercolateContext context) throws Exception {
context.trackScores(true);
sortParseElement.parse(parser, context);
// null, means default sorting by relevancy
if (context.sort() != null) {
throw new ElasticsearchParseException("Only _score desc is supported");
}
}
private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, MapperService mapperService, String index, String type) {
try (XContentParser parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc)) {
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
ParsedDocument doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true));
if (doc == null) {
throw new ElasticsearchParseException("No doc to percolate in the request");
}
if (context.highlight() != null) {
doc.setSource(fetchedDoc);
}
return doc;
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
}
}
}

View File

@ -27,6 +27,7 @@ public class PercolatorModule extends AbstractModule {
@Override @Override
protected void configure() { protected void configure() {
bind(PercolateDocumentParser.class).asEagerSingleton();
bind(PercolatorService.class).asEagerSingleton(); bind(PercolatorService.class).asEagerSingleton();
} }
} }

View File

@ -0,0 +1,250 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.ExtractQueryTermsService;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
final class PercolatorQuery extends Query {
public static final float MATCH_COST =
(1 << 14) // stored field access cost, approximated by the number of bytes in a block
+ 1000; // cost of matching the query against the document, arbitrary as it would be really complex to estimate
static class Builder {
private final IndexSearcher percolatorIndexSearcher;
private final Map<BytesRef, Query> percolatorQueries;
private Query percolateQuery;
private Query queriesMetaDataQuery;
private final Query percolateTypeQuery;
/**
* @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated
* @param percolatorQueries All the registered percolator queries
* @param percolateTypeQuery A query that identifies all document containing percolator queries
*/
Builder(IndexSearcher percolatorIndexSearcher, Map<BytesRef, Query> percolatorQueries, Query percolateTypeQuery) {
this.percolatorIndexSearcher = percolatorIndexSearcher;
this.percolatorQueries = percolatorQueries;
this.percolateTypeQuery = percolateTypeQuery;
}
/**
* Optionally sets a query that reduces the number of queries to percolate based on custom metadata attached
* on the percolator documents.
*/
void setPercolateQuery(Query percolateQuery) {
this.percolateQuery = percolateQuery;
}
/**
* Optionally sets a query that reduces the number of queries to percolate based on extracted terms from
* the document to be percolated.
*
* @param extractedTermsFieldName The name of the field to get the extracted terms from
* @param unknownQueryFieldname The field used to mark documents whose queries couldn't all get extracted
*/
void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException {
this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery(percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname);
}
PercolatorQuery build() {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(percolateTypeQuery, FILTER);
if (queriesMetaDataQuery != null) {
builder.add(queriesMetaDataQuery, FILTER);
}
if (percolateQuery != null){
builder.add(percolateQuery, MUST);
}
return new PercolatorQuery(builder.build(), percolatorIndexSearcher, percolatorQueries);
}
}
private final Query percolatorQueriesQuery;
private final IndexSearcher percolatorIndexSearcher;
private final Map<BytesRef, Query> percolatorQueries;
private PercolatorQuery(Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher, Map<BytesRef, Query> percolatorQueries) {
this.percolatorQueriesQuery = percolatorQueriesQuery;
this.percolatorIndexSearcher = percolatorIndexSearcher;
this.percolatorQueries = percolatorQueries;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1f) {
return super.rewrite(reader);
}
Query rewritten = percolatorQueriesQuery.rewrite(reader);
if (rewritten != percolatorQueriesQuery) {
return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries);
} else {
return this;
}
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
final Weight innerWeight = percolatorQueriesQuery.createWeight(searcher, needsScores);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
}
@Override
public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
Scorer scorer = scorer(leafReaderContext);
if (scorer != null) {
int result = scorer.iterator().advance(docId);
if (result == docId) {
return Explanation.match(scorer.score(), "PercolatorQuery");
}
}
return Explanation.noMatch("PercolatorQuery");
}
@Override
public float getValueForNormalization() throws IOException {
return innerWeight.getValueForNormalization();
}
@Override
public void normalize(float v, float v1) {
innerWeight.normalize(v, v1);
}
@Override
public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException {
final Scorer approximation = innerWeight.scorer(leafReaderContext);
if (approximation == null) {
return null;
}
final LeafReader leafReader = leafReaderContext.reader();
return new Scorer(this) {
@Override
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return new TwoPhaseIterator(approximation.iterator()) {
@Override
public boolean matches() throws IOException {
return matchDocId(approximation.docID(), leafReader);
}
@Override
public float matchCost() {
return MATCH_COST;
}
};
}
@Override
public float score() throws IOException {
return approximation.score();
}
@Override
public int freq() throws IOException {
return approximation.freq();
}
@Override
public int docID() {
return approximation.docID();
}
boolean matchDocId(int docId, LeafReader leafReader) throws IOException {
SingleFieldsVisitor singleFieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME);
leafReader.document(docId, singleFieldsVisitor);
BytesRef percolatorQueryId = new BytesRef(singleFieldsVisitor.uid().id());
return matchQuery(percolatorQueryId);
}
};
}
};
}
boolean matchQuery(BytesRef percolatorQueryId) throws IOException {
Query percolatorQuery = percolatorQueries.get(percolatorQueryId);
if (percolatorQuery != null) {
return Lucene.exists(percolatorIndexSearcher, percolatorQuery);
} else {
return false;
}
}
private final Object instance = new Object();
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
PercolatorQuery that = (PercolatorQuery) o;
return instance.equals(that.instance);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + instance.hashCode();
return result;
}
@Override
public String toString(String s) {
return "PercolatorQuery{inner={" + percolatorQueriesQuery.toString(s) + "}}";
}
}

View File

@ -18,134 +18,110 @@
*/ */
package org.elasticsearch.percolator; package org.elasticsearch.percolator;
import com.carrotsearch.hppc.IntObjectHashMap;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.memory.ExtendedMemoryIndex; import org.apache.lucene.index.memory.ExtendedMemoryIndex;
import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version;
import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.percolate.PercolateShardRequest; import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.action.percolate.PercolateShardResponse; import org.elasticsearch.action.percolate.PercolateShardResponse;
import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.HasContextAndHeaders;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.percolator.QueryCollector.Count;
import org.elasticsearch.percolator.QueryCollector.Match;
import org.elasticsearch.percolator.QueryCollector.MatchAndScore;
import org.elasticsearch.percolator.QueryCollector.MatchAndSort;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.highlight.HighlightPhase;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortParseElement;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
import static org.elasticsearch.index.mapper.SourceToParse.source; import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.elasticsearch.percolator.QueryCollector.count; import static org.apache.lucene.search.BooleanClause.Occur.MUST;
import static org.elasticsearch.percolator.QueryCollector.match;
import static org.elasticsearch.percolator.QueryCollector.matchAndScore;
public class PercolatorService extends AbstractComponent { public class PercolatorService extends AbstractComponent {
public final static float NO_SCORE = Float.NEGATIVE_INFINITY; public final static float NO_SCORE = Float.NEGATIVE_INFINITY;
public final static String TYPE_NAME = ".percolator"; public final static String TYPE_NAME = ".percolator";
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final IndicesService indicesService;
private final IntObjectHashMap<PercolatorType> percolatorTypes;
private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final ScriptService scriptService;
private final IndicesService indicesService;
private final ClusterService clusterService; private final ClusterService clusterService;
private final HighlightPhase highlightPhase;
private final AggregationPhase aggregationPhase;
private final PageCacheRecycler pageCacheRecycler;
private final ParseFieldMatcher parseFieldMatcher;
private final CloseableThreadLocal<MemoryIndex> cache;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final PercolateDocumentParser percolateDocumentParser;
private final PercolatorIndex single; private final PercolatorIndex single;
private final PercolatorIndex multi; private final PercolatorIndex multi;
private final HighlightPhase highlightPhase;
private final AggregationPhase aggregationPhase;
private final SortParseElement sortParseElement;
private final ScriptService scriptService;
private final MappingUpdatedAction mappingUpdatedAction;
private final CloseableThreadLocal<MemoryIndex> cache;
private final ParseFieldMatcher parseFieldMatcher;
@Inject @Inject
public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService,
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
HighlightPhase highlightPhase, ClusterService clusterService, HighlightPhase highlightPhase, ClusterService clusterService,
AggregationPhase aggregationPhase, ScriptService scriptService, AggregationPhase aggregationPhase, ScriptService scriptService,
MappingUpdatedAction mappingUpdatedAction) { PercolateDocumentParser percolateDocumentParser) {
super(settings); super(settings);
this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexNameExpressionResolver = indexNameExpressionResolver;
this.percolateDocumentParser = percolateDocumentParser;
this.parseFieldMatcher = new ParseFieldMatcher(settings); this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.indicesService = indicesService; this.indicesService = indicesService;
this.pageCacheRecycler = pageCacheRecycler; this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays; this.bigArrays = bigArrays;
this.clusterService = clusterService; this.clusterService = clusterService;
this.highlightPhase = highlightPhase;
this.aggregationPhase = aggregationPhase;
this.scriptService = scriptService; this.scriptService = scriptService;
this.mappingUpdatedAction = mappingUpdatedAction; this.aggregationPhase = aggregationPhase;
this.sortParseElement = new SortParseElement(); this.highlightPhase = highlightPhase;
final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes(); final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes();
cache = new CloseableThreadLocal<MemoryIndex>() { cache = new CloseableThreadLocal<MemoryIndex>() {
@ -157,23 +133,41 @@ public class PercolatorService extends AbstractComponent {
}; };
single = new SingleDocumentPercolatorIndex(cache); single = new SingleDocumentPercolatorIndex(cache);
multi = new MultiDocumentPercolatorIndex(cache); multi = new MultiDocumentPercolatorIndex(cache);
percolatorTypes = new IntObjectHashMap<>(6);
percolatorTypes.put(countPercolator.id(), countPercolator);
percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
percolatorTypes.put(matchPercolator.id(), matchPercolator);
percolatorTypes.put(queryPercolator.id(), queryPercolator);
percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
} }
public ReduceResult reduce(boolean onlyCount, List<PercolateShardResponse> shardResponses, HasContextAndHeaders headersContext) throws IOException {
public ReduceResult reduce(byte percolatorTypeId, List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) { if (onlyCount) {
PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId); long finalCount = 0;
return percolatorType.reduce(shardResults, headersContext); for (PercolateShardResponse shardResponse : shardResponses) {
finalCount += shardResponse.topDocs().totalHits;
} }
public PercolateShardResponse percolate(PercolateShardRequest request) { InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext);
return new PercolatorService.ReduceResult(finalCount, reducedAggregations);
} else {
int requestedSize = shardResponses.get(0).requestedSize();
TopDocs[] shardResults = new TopDocs[shardResponses.size()];
long foundMatches = 0;
for (int i = 0; i < shardResults.length; i++) {
TopDocs shardResult = shardResponses.get(i).topDocs();
foundMatches += shardResult.totalHits;
shardResults[i] = shardResult;
}
TopDocs merged = TopDocs.merge(requestedSize, shardResults);
PercolateResponse.Match[] matches = new PercolateResponse.Match[merged.scoreDocs.length];
for (int i = 0; i < merged.scoreDocs.length; i++) {
ScoreDoc doc = merged.scoreDocs[i];
PercolateShardResponse shardResponse = shardResponses.get(doc.shardIndex);
String id = shardResponse.ids().get(doc.doc);
Map<String, HighlightField> hl = shardResponse.hls().get(doc.doc);
matches[i] = new PercolateResponse.Match(new Text(shardResponse.getIndex()), new Text(id), doc.score, hl);
}
InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext);
return new PercolatorService.ReduceResult(foundMatches, matches, reducedAggregations);
}
}
public PercolateShardResponse percolate(PercolateShardRequest request) throws IOException {
IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = percolateIndexService.getShard(request.shardId().id()); IndexShard indexShard = percolateIndexService.getShard(request.shardId().id());
indexShard.readAllowed(); // check if we can read the shard... indexShard.readAllowed(); // check if we can read the shard...
@ -196,29 +190,11 @@ public class PercolatorService extends AbstractComponent {
); );
SearchContext.setCurrent(context); SearchContext.setCurrent(context);
try { try {
ParsedDocument parsedDocument = parseRequest(indexShard, request, context, request.shardId().getIndex()); ParsedDocument parsedDocument = percolateDocumentParser.parse(request, context, percolateIndexService.mapperService(), percolateIndexService.getQueryShardContext());
if (context.percolateQueries().isEmpty()) {
return new PercolateShardResponse(context, request.shardId());
}
if (request.docSource() != null && request.docSource().length() != 0) { if (context.searcher().getIndexReader().maxDoc() == 0) {
parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.shardId().getIndex(), request.documentType()); return new PercolateShardResponse(Lucene.EMPTY_TOP_DOCS, Collections.emptyMap(), Collections.emptyMap(), context);
} else if (parsedDocument == null) {
throw new IllegalArgumentException("Nothing to percolate");
} }
if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) {
context.percolateQuery(new MatchAllDocsQuery());
}
if (context.doSort && !context.limit) {
throw new IllegalArgumentException("Can't sort if size isn't specified");
}
if (context.highlight() != null && !context.limit) {
throw new IllegalArgumentException("Can't highlight if size isn't specified");
}
if (context.size() < 0) { if (context.size() < 0) {
context.size(0); context.size(0);
} }
@ -232,23 +208,27 @@ public class PercolatorService extends AbstractComponent {
} else { } else {
percolatorIndex = single; percolatorIndex = single;
} }
PercolatorType action;
if (request.onlyCount()) {
action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
} else {
if (context.doSort) {
action = topMatchingPercolator;
} else if (context.percolateQuery() != null) {
action = context.trackScores() ? scoringPercolator : queryPercolator;
} else {
action = matchPercolator;
}
}
context.percolatorTypeId = action.id();
percolatorIndex.prepare(context, parsedDocument); percolatorIndex.prepare(context, parsedDocument);
return action.doPercolate(request, context, isNested);
BucketCollector aggregatorCollector = null;
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
List<Aggregator> aggregatorCollectors = new ArrayList<>(aggregators.length);
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
aggregatorCollectors.add(aggregator);
}
}
context.aggregations().aggregators(aggregators);
aggregatorCollector = BucketCollector.wrap(aggregatorCollectors);
aggregatorCollector.preCollection();
}
PercolatorQueriesRegistry queriesRegistry = indexShard.percolateRegistry();
return doPercolate(context, queriesRegistry, aggregationPhase, aggregatorCollector, highlightPhase);
} finally { } finally {
SearchContext.removeCurrent(); SearchContext.removeCurrent();
context.close(); context.close();
@ -256,566 +236,101 @@ public class PercolatorService extends AbstractComponent {
} }
} }
private ParsedDocument parseRequest(IndexShard shard, PercolateShardRequest request, PercolateContext context, String index) { // moved the core percolation logic to a pck protected method to make testing easier:
BytesReference source = request.source(); static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException {
if (source == null || source.length() == 0) { PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter());
return null; if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_3_0_0)) {
builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME);
} }
if (context.percolateQuery() != null || context.aliasFilter() != null) {
// TODO: combine all feature parse elements into one map BooleanQuery.Builder bq = new BooleanQuery.Builder();
Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements();
ParsedDocument doc = null;
XContentParser parser = null;
// Some queries (function_score query when for decay functions) rely on a SearchContext being set:
// We switch types because this context needs to be in the context of the percolate queries in the shard and
// not the in memory percolate doc
String[] previousTypes = context.types();
context.types(new String[]{TYPE_NAME});
QueryShardContext queryShardContext = shard.getQueryShardContext();
try {
parser = XContentFactory.xContent(source).createParser(source);
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
// we need to check the "doc" here, so the next token will be START_OBJECT which is
// the actual document starting
if ("doc".equals(currentFieldName)) {
if (doc != null) {
throw new ElasticsearchParseException("Either specify doc or get, not both");
}
MapperService mapperService = shard.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType());
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true));
if (docMapper.getMapping() != null) {
doc.addDynamicMappingsUpdate(docMapper.getMapping());
}
if (doc.dynamicMappingsUpdate() != null) {
mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate());
}
// the document parsing exists the "doc" object, so we need to set the new current field.
currentFieldName = parser.currentName();
}
} else if (token == XContentParser.Token.START_OBJECT) {
SearchParseElement element = hlElements.get(currentFieldName);
if (element == null) {
element = aggregationElements.get(currentFieldName);
}
if ("query".equals(currentFieldName)) {
if (context.percolateQuery() != null) { if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both"); bq.add(context.percolateQuery(), MUST);
}
context.percolateQuery(queryShardContext.parse(parser).query());
} else if ("filter".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
}
Query filter = queryShardContext.parseInnerFilter(parser).query();
context.percolateQuery(new ConstantScoreQuery(filter));
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if (element != null) {
element.parse(parser, context);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
}
} else if (token == null) {
break;
} else if (token.isValue()) {
if ("size".equals(currentFieldName)) {
context.size(parser.intValue());
if (context.size() < 0) {
throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size());
}
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
context.trackScores(parser.booleanValue());
} }
if (context.aliasFilter() != null) {
bq.add(context.aliasFilter(), FILTER);
} }
builder.setPercolateQuery(bq.build());
} }
PercolatorQuery percolatorQuery = builder.build();
// We need to get the actual source from the request body for highlighting, so parse the request body again if (context.isOnlyCount() || context.size() == 0) {
// and only get the doc source. TotalHitCountCollector collector = new TotalHitCountCollector();
if (context.highlight() != null) { context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector));
parser.close(); if (aggregatorCollector != null) {
currentFieldName = null; aggregatorCollector.postCollection();
parser = XContentFactory.xContent(source).createParser(source); aggregationPhase.execute(context);
token = parser.nextToken(); }
assert token == XContentParser.Token.START_OBJECT; return new PercolateShardResponse(new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0f), Collections.emptyMap(), Collections.emptyMap(), context);
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("doc".equals(currentFieldName)) {
BytesStreamOutput bStream = new BytesStreamOutput();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
builder.copyCurrentStructure(parser);
builder.close();
doc.setSource(bStream.bytes());
break;
} else { } else {
parser.skipChildren(); int size = context.size();
} if (size > context.searcher().getIndexReader().maxDoc()) {
} else if (token == null) { // prevent easy OOM if more than the total number of docs that exist is requested...
break; size = context.searcher().getIndexReader().maxDoc();
}
} }
TopScoreDocCollector collector = TopScoreDocCollector.create(size);
context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector));
if (aggregatorCollector != null) {
aggregatorCollector.postCollection();
aggregationPhase.execute(context);
} }
} catch (Throwable e) { TopDocs topDocs = collector.topDocs();
throw new ElasticsearchParseException("failed to parse request", e); Map<Integer, String> ids = new HashMap<>(topDocs.scoreDocs.length);
} finally { Map<Integer, Map<String, HighlightField>> hls = new HashMap<>(topDocs.scoreDocs.length);
context.types(previousTypes); for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
if (parser != null) { if (context.trackScores() == false) {
parser.close(); // No sort or tracking scores was provided, so use special value to indicate to not show the scores:
} scoreDoc.score = NO_SCORE;
} }
return doc; int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
} LeafReaderContext atomicReaderContext = context.searcher().getIndexReader().leaves().get(segmentIdx);
final int segmentDocId = scoreDoc.doc - atomicReaderContext.docBase;
private void parseSort(XContentParser parser, PercolateContext context) throws Exception { SingleFieldsVisitor fieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME);
sortParseElement.parse(parser, context); atomicReaderContext.reader().document(segmentDocId, fieldsVisitor);
// null, means default sorting by relevancy String id = fieldsVisitor.uid().id();
if (context.sort() == null) { ids.put(scoreDoc.doc, id);
context.doSort = true;
} else {
throw new ElasticsearchParseException("Only _score desc is supported");
}
}
private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, IndexService documentIndexService, String index, String type) {
ParsedDocument doc = null;
XContentParser parser = null;
try {
parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc);
MapperService mapperService = documentIndexService.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true));
if (context.highlight() != null) { if (context.highlight() != null) {
doc.setSource(fetchedDoc); Query query = queriesRegistry.getPercolateQueries().get(new BytesRef(id));
} context.parsedQuery(new ParsedQuery(query));
} catch (Throwable e) { context.hitContext().cache().clear();
throw new ElasticsearchParseException("failed to parse request", e); highlightPhase.hitExecute(context, context.hitContext());
} finally { hls.put(scoreDoc.doc, context.hitContext().hit().getHighlightFields());
if (parser != null) {
parser.close();
} }
} }
return new PercolateShardResponse(topDocs, ids, hls, context);
if (doc == null) {
throw new ElasticsearchParseException("No doc to percolate in the request");
} }
return doc;
} }
public void close() { public void close() {
cache.close(); cache.close();
} }
interface PercolatorType { private InternalAggregations reduceAggregations(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
if (shardResults.get(0).aggregations() == null) {
// 0x00 is reserved for empty type. return null;
byte id();
ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext);
PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested);
} }
private final PercolatorType countPercolator = new PercolatorType() { List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
for (PercolateShardResponse shardResult : shardResults) {
@Override aggregationsList.add(shardResult.aggregations());
public byte id() {
return 0x01;
} }
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext));
@Override if (aggregations != null) {
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) { List<SiblingPipelineAggregator> pipelineAggregators = shardResults.get(0).pipelineAggregators();
long finalCount = 0; if (pipelineAggregators != null) {
for (PercolateShardResponse shardResponse : shardResults) { List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> {
finalCount += shardResponse.count(); return (InternalAggregation) p;
}).collect(Collectors.toList());
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext));
newAggs.add(newAgg);
} }
aggregations = new InternalAggregations(newAggs);
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(finalCount, reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
for (Map.Entry<BytesRef, Query> entry : context.percolateQueries().entrySet()) {
try {
Query existsQuery = entry.getValue();
if (isNested) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
if (Lucene.exists(context.docSearcher(), existsQuery)) {
count ++;
}
} catch (Throwable e) {
logger.debug("[" + entry.getKey() + "] failed to execute query", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} }
} }
return new PercolateShardResponse(count, context, request.shardId()); return aggregations;
}
};
private final PercolatorType queryCountPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x02;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return countPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Count countCollector = count(logger, context, isNested);
queryBasedPercolating(percolatorSearcher, context, countCollector);
count = countCollector.counter();
} catch (Throwable e) {
logger.warn("failed to execute", e);
} finally {
percolatorSearcher.close();
}
return new PercolateShardResponse(count, context, request.shardId());
}
};
private final PercolatorType matchPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x03;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
long foundMatches = 0;
int numMatches = 0;
for (PercolateShardResponse response : shardResults) {
foundMatches += response.count();
numMatches += response.matches().length;
}
int requestedSize = shardResults.get(0).requestedSize();
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize);
outer:
for (PercolateShardResponse response : shardResults) {
Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
Text match = new Text(new BytesArray(response.matches()[i]));
Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
if (requestedSize != 0 && finalMatches.size() == requestedSize) {
break outer;
}
}
}
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
List<BytesRef> matches = new ArrayList<>();
List<Map<String, HighlightField>> hls = new ArrayList<>();
for (Map.Entry<BytesRef, Query> entry : context.percolateQueries().entrySet()) {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(entry.getValue()));
context.hitContext().cache().clear();
}
try {
Query existsQuery = entry.getValue();
if (isNested) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
if (Lucene.exists(context.docSearcher(), existsQuery)) {
if (!context.limit || count < context.size()) {
matches.add(entry.getKey());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
count++;
}
} catch (Throwable e) {
logger.debug("[" + entry.getKey() + "] failed to execute query", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
}
}
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId());
}
};
private final PercolatorType queryPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x04;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return matchPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Match match = match(logger, context, highlightPhase, isNested);
queryBasedPercolating(percolatorSearcher, context, match);
List<BytesRef> matches = match.matches();
List<Map<String, HighlightField>> hls = match.hls();
long count = match.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private final PercolatorType scoringPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x05;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return matchPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase, isNested);
queryBasedPercolating(percolatorSearcher, context, matchAndScore);
List<BytesRef> matches = matchAndScore.matches();
List<Map<String, HighlightField>> hls = matchAndScore.hls();
float[] scores = matchAndScore.scores().toArray();
long count = matchAndScore.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private final PercolatorType topMatchingPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x06;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
long foundMatches = 0;
int nonEmptyResponses = 0;
int firstNonEmptyIndex = 0;
for (int i = 0; i < shardResults.size(); i++) {
PercolateShardResponse response = shardResults.get(i);
foundMatches += response.count();
if (response.matches().length != 0) {
if (firstNonEmptyIndex == 0) {
firstNonEmptyIndex = i;
}
nonEmptyResponses++;
}
}
int requestedSize = shardResults.get(0).requestedSize();
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize);
if (nonEmptyResponses == 1) {
PercolateShardResponse response = shardResults.get(firstNonEmptyIndex);
Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? Float.NaN : response.scores()[i];
Text match = new Text(new BytesArray(response.matches()[i]));
if (!response.hls().isEmpty()) {
Map<String, HighlightField> hl = response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
} else {
finalMatches.add(new PercolateResponse.Match(index, match, score));
}
}
} else {
int[] slots = new int[shardResults.size()];
while (true) {
float lowestScore = Float.NEGATIVE_INFINITY;
int requestIndex = -1;
int itemIndex = -1;
for (int i = 0; i < shardResults.size(); i++) {
int scoreIndex = slots[i];
float[] scores = shardResults.get(i).scores();
if (scoreIndex >= scores.length) {
continue;
}
float score = scores[scoreIndex];
int cmp = Float.compare(lowestScore, score);
// TODO: Maybe add a tie?
if (cmp < 0) {
requestIndex = i;
itemIndex = scoreIndex;
lowestScore = score;
}
}
// This means the shard matches have been exhausted and we should bail
if (requestIndex == -1) {
break;
}
slots[requestIndex]++;
PercolateShardResponse shardResponse = shardResults.get(requestIndex);
Text index = new Text(shardResponse.getIndex());
Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex]));
float score = shardResponse.scores()[itemIndex];
if (!shardResponse.hls().isEmpty()) {
Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
} else {
finalMatches.add(new PercolateResponse.Match(index, match, score));
}
if (finalMatches.size() == requestedSize) {
break;
}
}
}
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context, isNested);
queryBasedPercolating(percolatorSearcher, context, matchAndSort);
TopDocs topDocs = matchAndSort.topDocs();
long count = topDocs.totalHits;
List<BytesRef> matches = new ArrayList<>(topDocs.scoreDocs.length);
float[] scores = new float[topDocs.scoreDocs.length];
List<Map<String, HighlightField>> hls = null;
if (context.highlight() != null) {
hls = new ArrayList<>(topDocs.scoreDocs.length);
}
final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME);
final IndexFieldData<?> uidFieldData = context.fieldData().getForField(uidMapper);
int i = 0;
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves());
LeafReaderContext atomicReaderContext = percolatorSearcher.reader().leaves().get(segmentIdx);
SortedBinaryDocValues values = uidFieldData.load(atomicReaderContext).getBytesValues();
final int localDocId = scoreDoc.doc - atomicReaderContext.docBase;
values.setDocument(localDocId);
final int numValues = values.count();
assert numValues == 1;
BytesRef bytes = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1];
matches.add(BytesRef.deepCopyOf(bytes));
if (hls != null) {
Query query = context.percolateQueries().get(bytes);
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
highlightPhase.hitExecute(context, context.hitContext());
hls.add(i, context.hitContext().hit().getHighlightFields());
}
scores[i++] = scoreDoc.score;
}
if (hls != null) {
return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), hls, count, scores, context, request.shardId());
} else {
return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), count, scores, context, request.shardId());
}
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException {
Query percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter();
final Query filter;
if (context.aliasFilter() != null) {
BooleanQuery.Builder booleanFilter = new BooleanQuery.Builder();
booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST);
booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST);
filter = booleanFilter.build();
} else {
filter = percolatorTypeFilter;
}
Query query = Queries.filtered(context.percolateQuery(), filter);
percolatorSearcher.searcher().search(query, percolateCollector);
percolateCollector.aggregatorCollector.postCollection();
if (context.aggregations() != null) {
aggregationPhase.execute(context);
}
} }
public final static class ReduceResult { public final static class ReduceResult {
@ -849,32 +364,5 @@ public class PercolatorService extends AbstractComponent {
} }
} }
private InternalAggregations reduceAggregations(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
if (shardResults.get(0).aggregations() == null) {
return null;
}
List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
for (PercolateShardResponse shardResult : shardResults) {
aggregationsList.add(shardResult.aggregations());
}
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService,
headersContext));
if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = shardResults.get(0).pipelineAggregators();
if (pipelineAggregators != null) {
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(
bigArrays, scriptService, headersContext));
newAggs.add(newAgg);
}
aggregations = new InternalAggregations(newAggs);
}
}
return aggregations;
}
} }

View File

@ -1,403 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import com.carrotsearch.hppc.FloatArrayList;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
/**
*/
abstract class QueryCollector extends SimpleCollector {
final IndexFieldData<?> uidFieldData;
final IndexSearcher searcher;
final ConcurrentMap<BytesRef, Query> queries;
final ESLogger logger;
boolean isNestedDoc = false;
BytesRef current;
SortedBinaryDocValues values;
final BucketCollector aggregatorCollector;
LeafCollector aggregatorLeafCollector;
QueryCollector(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
this.logger = logger;
this.queries = context.percolateQueries();
this.searcher = context.docSearcher();
final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME);
this.uidFieldData = context.fieldData().getForField(uidMapper);
this.isNestedDoc = isNestedDoc;
List<Aggregator> aggregatorCollectors = new ArrayList<>();
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
aggregatorCollectors.add(aggregator);
}
}
context.aggregations().aggregators(aggregators);
}
aggregatorCollector = BucketCollector.wrap(aggregatorCollectors);
aggregatorCollector.preCollection();
}
public void postMatch(int doc) throws IOException {
aggregatorLeafCollector.collect(doc);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
aggregatorLeafCollector.setScorer(scorer);
}
@Override
public boolean needsScores() {
return aggregatorCollector.needsScores();
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
// we use the UID because id might not be indexed
values = uidFieldData.load(context).getBytesValues();
aggregatorLeafCollector = aggregatorCollector.getLeafCollector(context);
}
static Match match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
return new Match(logger, context, highlightPhase, isNestedDoc);
}
static Count count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
return new Count(logger, context, isNestedDoc);
}
static MatchAndScore matchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
return new MatchAndScore(logger, context, highlightPhase, isNestedDoc);
}
static MatchAndSort matchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
return new MatchAndSort(logger, context, isNestedDoc);
}
protected final Query getQuery(int doc) {
values.setDocument(doc);
final int numValues = values.count();
if (numValues == 0) {
return null;
}
assert numValues == 1;
current = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1];
return queries.get(current);
}
final static class Match extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<>();
final List<Map<String, HighlightField>> hls = new ArrayList<>();
final boolean limit;
final int size;
long counter = 0;
Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
this.limit = context.limit;
this.size = context.size();
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
}
if (Lucene.exists(searcher, existsQuery)) {
if (!limit || counter < size) {
matches.add(BytesRef.deepCopyOf(current));
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class MatchAndSort extends QueryCollector {
private final TopScoreDocCollector topDocsCollector;
private LeafCollector topDocsLeafCollector;
MatchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
// TODO: Use TopFieldCollector.create(...) for ascending and descending scoring?
topDocsCollector = TopScoreDocCollector.create(context.size());
}
@Override
public boolean needsScores() {
return super.needsScores() || topDocsCollector.needsScores();
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (Lucene.exists(searcher, existsQuery)) {
topDocsLeafCollector.collect(doc);
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
topDocsLeafCollector = topDocsCollector.getLeafCollector(context);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
topDocsLeafCollector.setScorer(scorer);
}
TopDocs topDocs() {
return topDocsCollector.topDocs();
}
}
final static class MatchAndScore extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<>();
final List<Map<String, HighlightField>> hls = new ArrayList<>();
// TODO: Use thread local in order to cache the scores lists?
final FloatArrayList scores = new FloatArrayList();
final boolean limit;
final int size;
long counter = 0;
private Scorer scorer;
MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
this.limit = context.limit;
this.size = context.size();
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public boolean needsScores() {
return true;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
}
if (Lucene.exists(searcher, existsQuery)) {
if (!limit || counter < size) {
matches.add(BytesRef.deepCopyOf(current));
scores.add(scorer.score());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
FloatArrayList scores() {
return scores;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class Count extends QueryCollector {
private long counter = 0;
Count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (Lucene.exists(searcher, existsQuery)) {
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
}
}

View File

@ -40,7 +40,6 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
@ -100,9 +99,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
case SETTINGS: case SETTINGS:
writeSettings(response.settings().get(index), builder, request); writeSettings(response.settings().get(index), builder, request);
break; break;
case WARMERS:
writeWarmers(response.warmers().get(index), builder, request);
break;
default: default:
throw new IllegalStateException("feature [" + feature + "] is not valid"); throw new IllegalStateException("feature [" + feature + "] is not valid");
} }
@ -142,15 +138,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
builder.endObject(); builder.endObject();
} }
private void writeWarmers(List<IndexWarmersMetaData.Entry> warmers, XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.WARMERS);
if (warmers != null) {
for (IndexWarmersMetaData.Entry warmer : warmers) {
IndexWarmersMetaData.toXContent(warmer, builder, params);
}
}
builder.endObject();
}
}); });
} }

View File

@ -1,58 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.delete;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import static org.elasticsearch.rest.RestRequest.Method.DELETE;
/**
*/
public class RestDeleteWarmerAction extends BaseRestHandler {
@Inject
public RestDeleteWarmerAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client);
controller.registerHandler(DELETE, "/{index}/_warmer", this);
controller.registerHandler(DELETE, "/{index}/_warmer/{name}", this);
controller.registerHandler(DELETE, "/{index}/_warmers", this);
controller.registerHandler(DELETE, "/{index}/_warmers/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name")))
.indices(Strings.splitStringByCommaToArray(request.param("index")));
deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout()));
deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout()));
deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions()));
client.admin().indices().deleteWarmer(deleteWarmerRequest, new AcknowledgedRestListener<DeleteWarmerResponse>(channel));
}
}

View File

@ -1,92 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.get;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestStatus.OK;
/**
*
*/
public class RestGetWarmerAction extends BaseRestHandler {
@Inject
public RestGetWarmerAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client);
controller.registerHandler(GET, "/_warmer/{name}", this);
controller.registerHandler(GET, "/{index}/_warmer/{name}", this);
controller.registerHandler(GET, "/{index}/_warmers/{name}", this);
controller.registerHandler(GET, "/{index}/{type}/_warmer/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final String[] types = Strings.splitStringByCommaToArray(request.param("type"));
final String[] names = request.paramAsStringArray("name", Strings.EMPTY_ARRAY);
GetWarmersRequest getWarmersRequest = new GetWarmersRequest();
getWarmersRequest.indices(indices).types(types).warmers(names);
getWarmersRequest.local(request.paramAsBoolean("local", getWarmersRequest.local()));
getWarmersRequest.indicesOptions(IndicesOptions.fromRequest(request, getWarmersRequest.indicesOptions()));
client.admin().indices().getWarmers(getWarmersRequest, new RestBuilderListener<GetWarmersResponse>(channel) {
@Override
public RestResponse buildResponse(GetWarmersResponse response, XContentBuilder builder) throws Exception {
if (indices.length > 0 && response.warmers().isEmpty()) {
return new BytesRestResponse(OK, builder.startObject().endObject());
}
builder.startObject();
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry : response.warmers()) {
builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE);
for (IndexWarmersMetaData.Entry warmerEntry : entry.value) {
IndexWarmersMetaData.toXContent(warmerEntry, builder, request);
}
builder.endObject();
builder.endObject();
}
builder.endObject();
return new BytesRestResponse(OK, builder);
}
});
}
}

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.put;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
/**
*/
public class RestPutWarmerAction extends BaseRestHandler {
private final IndicesQueriesRegistry queryRegistry;
@Inject
public RestPutWarmerAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry) {
super(settings, controller, client);
this.queryRegistry = queryRegistry;
controller.registerHandler(PUT, "/_warmer/{name}", this);
controller.registerHandler(PUT, "/{index}/_warmer/{name}", this);
controller.registerHandler(PUT, "/{index}/{type}/_warmer/{name}", this);
controller.registerHandler(PUT, "/_warmers/{name}", this);
controller.registerHandler(PUT, "/{index}/_warmers/{name}", this);
controller.registerHandler(PUT, "/{index}/{type}/_warmers/{name}", this);
controller.registerHandler(POST, "/_warmer/{name}", this);
controller.registerHandler(POST, "/{index}/_warmer/{name}", this);
controller.registerHandler(POST, "/{index}/{type}/_warmer/{name}", this);
controller.registerHandler(POST, "/_warmers/{name}", this);
controller.registerHandler(POST, "/{index}/_warmers/{name}", this);
controller.registerHandler(POST, "/{index}/{type}/_warmers/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name"));
BytesReference sourceBytes = RestActions.getRestContent(request);
SearchSourceBuilder source = RestActions.getRestSearchSource(sourceBytes, queryRegistry, parseFieldMatcher);
SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")))
.types(Strings.splitStringByCommaToArray(request.param("type")))
.requestCache(request.paramAsBoolean("request_cache", null)).source(source);
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
putWarmerRequest.searchRequest(searchRequest);
putWarmerRequest.timeout(request.paramAsTime("timeout", putWarmerRequest.timeout()));
putWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putWarmerRequest.masterNodeTimeout()));
client.admin().indices().putWarmer(putWarmerRequest, new AcknowledgedRestListener<>(channel));
}
}

View File

@ -41,7 +41,7 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;

Some files were not shown because too many files have changed in this diff Show More