Merge branch 'master' into feature/aggs-refactoring

# Conflicts:
#	core/src/main/java/org/elasticsearch/percolator/PercolatorService.java
#	core/src/main/java/org/elasticsearch/percolator/QueryCollector.java
This commit is contained in:
Colin Goodheart-Smithe 2016-01-07 12:42:02 +00:00
commit 3b7d1b47f7
160 changed files with 3381 additions and 6121 deletions

View File

@ -566,7 +566,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
INDEX_WARMER_MISSING_EXCEPTION(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, org.elasticsearch.search.warmer.IndexWarmerMissingException::new, 93),
// 93 used to be for IndexWarmerMissingException
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),

View File

@ -25,7 +25,6 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -286,7 +285,8 @@ public class Version {
public static final Version CURRENT = V_3_0_0;
static {
assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]";
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
}
public static Version readVersion(StreamInput in) throws IOException {
@ -457,7 +457,6 @@ public class Version {
return V_0_90_0_RC1;
case V_0_90_0_Beta1_ID:
return V_0_90_0_Beta1;
case V_0_20_7_ID:
return V_0_20_7;
case V_0_20_6_ID:
@ -476,7 +475,6 @@ public class Version {
return V_0_20_0;
case V_0_20_0_RC1_ID:
return V_0_20_0_RC1;
case V_0_19_0_RC1_ID:
return V_0_19_0_RC1;
case V_0_19_0_RC2_ID:
@ -511,7 +509,6 @@ public class Version {
return V_0_19_12;
case V_0_19_13_ID:
return V_0_19_13;
case V_0_18_0_ID:
return V_0_18_0;
case V_0_18_1_ID:
@ -530,9 +527,8 @@ public class Version {
return V_0_18_7;
case V_0_18_8_ID:
return V_0_18_8;
default:
return new Version(id, false, Lucene.VERSION);
return new Version(id, false, org.apache.lucene.util.Version.LATEST);
}
}

View File

@ -127,12 +127,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.TransportBulkAction;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
@ -304,9 +298,6 @@ public class ActionModule extends AbstractModule {
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);

View File

@ -35,7 +35,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable {
/**
* Sets the array of aliases that the action relates to
*/
AliasesRequest aliases(String[] aliases);
AliasesRequest aliases(String... aliases);
/**
* Returns true if wildcards expressions among aliases should be resolved, false otherwise

View File

@ -41,9 +41,9 @@ public interface IndicesRequest {
IndicesOptions indicesOptions();
static interface Replaceable extends IndicesRequest {
/*
* Sets the array of indices that the action relates to
/**
* Sets the indices that the action relates to.
*/
IndicesRequest indices(String[] indices);
IndicesRequest indices(String... indices);
}
}

View File

@ -61,7 +61,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
}
@Override
public ClusterHealthRequest indices(String[] indices) {
public ClusterHealthRequest indices(String... indices) {
this.indices = indices;
return this;
}

View File

@ -51,7 +51,7 @@ public class IndicesExistsRequest extends MasterNodeReadRequest<IndicesExistsReq
}
@Override
public IndicesExistsRequest indices(String[] indices) {
public IndicesExistsRequest indices(String... indices) {
this.indices = indices;
return this;
}

View File

@ -52,7 +52,7 @@ public class TypesExistsRequest extends MasterNodeReadRequest<TypesExistsRequest
}
@Override
public TypesExistsRequest indices(String[] indices) {
public TypesExistsRequest indices(String... indices) {
this.indices = indices;
return this;
}

View File

@ -37,8 +37,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
public static enum Feature {
ALIASES((byte) 0, "_aliases", "_alias"),
MAPPINGS((byte) 1, "_mappings", "_mapping"),
SETTINGS((byte) 2, "_settings"),
WARMERS((byte) 3, "_warmers", "_warmer");
SETTINGS((byte) 2, "_settings");
private static final Feature[] FEATURES = new Feature[Feature.values().length];
@ -97,7 +96,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
}
}
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS, Feature.WARMERS };
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
private Feature[] features = DEFAULT_FEATURES;
private boolean humanReadable = false;

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException;
import java.util.ArrayList;
@ -39,19 +38,15 @@ import java.util.List;
*/
public class GetIndexResponse extends ActionResponse {
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of();
private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
private ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
private String[] indices;
GetIndexResponse(String[] indices, ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers,
GetIndexResponse(String[] indices,
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings,
ImmutableOpenMap<String, List<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) {
this.indices = indices;
if (warmers != null) {
this.warmers = warmers;
}
if (mappings != null) {
this.mappings = mappings;
}
@ -74,14 +69,6 @@ public class GetIndexResponse extends ActionResponse {
return indices();
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
return warmers;
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
return warmers();
}
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings() {
return mappings;
}
@ -110,23 +97,6 @@ public class GetIndexResponse extends ActionResponse {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.indices = in.readStringArray();
int warmersSize = in.readVInt();
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> warmersMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < warmersSize; i++) {
String key = in.readString();
int valueSize = in.readVInt();
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
for (int j = 0; j < valueSize; j++) {
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
in.readString(),
in.readStringArray(),
in.readOptionalBoolean(),
in.readBoolean() ? new IndexWarmersMetaData.SearchSource(in) : null)
);
}
warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
}
warmers = warmersMapBuilder.build();
int mappingsSize = in.readVInt();
ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> mappingsMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < mappingsSize; i++) {
@ -164,21 +134,6 @@ public class GetIndexResponse extends ActionResponse {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeVInt(warmers.size());
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
out.writeString(indexEntry.key);
out.writeVInt(indexEntry.value.size());
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
out.writeString(warmerEntry.name());
out.writeStringArray(warmerEntry.types());
out.writeOptionalBoolean(warmerEntry.requestCache());
boolean hasSource = warmerEntry.source() != null;
out.writeBoolean(hasSource);
if (hasSource) {
warmerEntry.source().writeTo(out);
}
}
}
out.writeVInt(mappings.size());
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) {
out.writeString(indexEntry.key);

View File

@ -36,7 +36,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -72,7 +71,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
final ActionListener<GetIndexResponse> listener) {
ImmutableOpenMap<String, List<Entry>> warmersResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, List<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
@ -80,15 +78,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
boolean doneAliases = false;
boolean doneMappings = false;
boolean doneSettings = false;
boolean doneWarmers = false;
for (Feature feature : features) {
switch (feature) {
case WARMERS:
if (!doneWarmers) {
warmersResult = state.metaData().findWarmers(concreteIndices, request.types(), Strings.EMPTY_ARRAY);
doneWarmers = true;
}
break;
case MAPPINGS:
if (!doneMappings) {
mappingsResult = state.metaData().findMappings(concreteIndices, request.types());
@ -120,6 +111,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
throw new IllegalStateException("feature [" + feature + "] is not valid");
}
}
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings));
listener.onResponse(new GetIndexResponse(concreteIndices, mappingsResult, aliasesResult, settings));
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.mapping.put;
import com.carrotsearch.hppc.ObjectHashSet;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
@ -96,7 +97,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
* Sets the indices this put mapping operation will execute on.
*/
@Override
public PutMappingRequest indices(String[] indices) {
public PutMappingRequest indices(String... indices) {
this.indices = indices;
return this;
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Action for the admin/warmers/delete API.
*/
public class DeleteWarmerAction extends Action<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction();
public static final String NAME = "indices:admin/warmers/delete";
private DeleteWarmerAction() {
super(NAME);
}
@Override
public DeleteWarmerResponse newResponse() {
return new DeleteWarmerResponse();
}
@Override
public DeleteWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new DeleteWarmerRequestBuilder(client, this);
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.CollectionUtils;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A request that deletes a index warmer (name, {@link org.elasticsearch.action.search.SearchRequest})
* tuple from the clusters metadata.
*/
public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> implements IndicesRequest.Replaceable {
private String[] names = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] indices = Strings.EMPTY_ARRAY;
public DeleteWarmerRequest() {
}
/**
* Constructs a new delete warmer request for the specified name.
*
* @param names the name (or wildcard expression) of the warmer to match, null to delete all.
*/
public DeleteWarmerRequest(String... names) {
names(names);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(names)) {
validationException = addValidationError("warmer names are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, names);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("indices are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
}
/**
* The name to delete.
*/
@Nullable
public String[] names() {
return names;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequest names(@Nullable String... names) {
this.names = names;
return this;
}
/**
* Sets the indices this put mapping operation will execute on.
*/
@Override
public DeleteWarmerRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be put.
*/
@Override
public String[] indices() {
return indices;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
names = in.readStringArray();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(names);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
writeTimeout(out);
}
}

View File

@ -1,60 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* A builder for the {@link DeleteWarmerRequest}
*
* @see DeleteWarmerRequest for details
*/
public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public DeleteWarmerRequestBuilder(ElasticsearchClient client, DeleteWarmerAction action) {
super(client, action, new DeleteWarmerRequest());
}
public DeleteWarmerRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequestBuilder setNames(String... names) {
request.names(names);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>
* For example indices that don't exist.
*/
public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
}

View File

@ -1,51 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* An acknowledged response of delete warmer operation.
*/
public class DeleteWarmerResponse extends AcknowledgedResponse {
DeleteWarmerResponse() {
super();
}
DeleteWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}

View File

@ -1,163 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Internal Actions executed on the master deleting the warmer from the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportDeleteWarmerAction extends TransportMasterNodeAction<DeleteWarmerRequest, DeleteWarmerResponse> {
@Inject
public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteWarmerRequest::new);
}
@Override
protected String executor() {
// we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected DeleteWarmerResponse newResponse() {
return new DeleteWarmerResponse();
}
@Override
protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
}
@Override
protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask<DeleteWarmerResponse>(request, listener) {
@Override
protected DeleteWarmerResponse newResponse(boolean acknowledged) {
return new DeleteWarmerResponse(acknowledged);
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), concreteIndices);
super.onFailure(source, t);
}
@Override
public ClusterState execute(ClusterState currentState) {
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
boolean globalFoundAtLeastOne = false;
boolean deleteAll = false;
for (int i=0; i<request.names().length; i++){
if (request.names()[i].equals(MetaData.ALL)) {
deleteAll = true;
break;
}
}
for (String index : concreteIndices) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers != null) {
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>();
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
boolean keepWarmer = true;
for (String warmer : request.names()) {
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
globalFoundAtLeastOne = true;
keepWarmer = false;
// don't add it...
break;
}
}
if (keepWarmer) {
entries.add(entry);
}
}
// a change, update it...
if (entries.size() != warmers.entries().size()) {
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
mdBuilder.put(indexBuilder);
}
}
}
if (globalFoundAtLeastOne == false && deleteAll == false) {
throw new IndexWarmerMissingException(request.names());
}
if (logger.isInfoEnabled()) {
for (String index : concreteIndices) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers != null) {
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
for (String warmer : request.names()) {
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
logger.info("[{}] delete warmer [{}]", index, entry.name());
}
}
}
} else if(deleteAll){
logger.debug("no warmers to delete on index [{}]", index);
}
}
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
});
}
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Action for the admin/warmers/get API.
*/
public class GetWarmersAction extends Action<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
public static final GetWarmersAction INSTANCE = new GetWarmersAction();
public static final String NAME = "indices:admin/warmers/get";
private GetWarmersAction() {
super(NAME);
}
@Override
public GetWarmersRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new GetWarmersRequestBuilder(client, this);
}
@Override
public GetWarmersResponse newResponse() {
return new GetWarmersResponse();
}
}

View File

@ -1,64 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* A {@link ClusterInfoRequest} that fetches {@link org.elasticsearch.search.warmer.IndexWarmersMetaData} for
* a list or all existing index warmers in the cluster-state
*/
public class GetWarmersRequest extends ClusterInfoRequest<GetWarmersRequest> {
private String[] warmers = Strings.EMPTY_ARRAY;
public GetWarmersRequest warmers(String[] warmers) {
this.warmers = warmers;
return this;
}
public String[] warmers() {
return warmers;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
warmers = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(warmers);
}
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.util.ArrayUtils;
/**
* Builder for {@link GetWarmersRequest}
*
* @see GetWarmersRequest for details
*/
public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
public GetWarmersRequestBuilder(ElasticsearchClient client, GetWarmersAction action, String... indices) {
super(client, action, new GetWarmersRequest().indices(indices));
}
public GetWarmersRequestBuilder setWarmers(String... warmers) {
request.warmers(warmers);
return this;
}
public GetWarmersRequestBuilder addWarmers(String... warmers) {
request.warmers(ArrayUtils.concat(request.warmers(), warmers));
return this;
}
}

View File

@ -1,107 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified
* in the {@link GetWarmersRequest}. This information is fetched from the current master since the metadata
* is contained inside the cluster-state
*/
public class GetWarmersResponse extends ActionResponse {
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
GetWarmersResponse(ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers) {
this.warmers = warmers;
}
GetWarmersResponse() {
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
return warmers;
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
return warmers();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < size; i++) {
String key = in.readString();
int valueSize = in.readVInt();
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
for (int j = 0; j < valueSize; j++) {
String name = in.readString();
String[] types = in.readStringArray();
IndexWarmersMetaData.SearchSource source = null;
if (in.readBoolean()) {
source = new IndexWarmersMetaData.SearchSource(in);
}
Boolean queryCache = null;
queryCache = in.readOptionalBoolean();
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
name,
types,
queryCache,
source)
);
}
indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
}
warmers = indexMapBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(warmers.size());
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
out.writeString(indexEntry.key);
out.writeVInt(indexEntry.value.size());
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
out.writeString(warmerEntry.name());
out.writeStringArray(warmerEntry.types());
boolean hasWarmerSource = warmerEntry != null;
out.writeBoolean(hasWarmerSource);
if (hasWarmerSource) {
warmerEntry.source().writeTo(out);
}
out.writeOptionalBoolean(warmerEntry.requestCache());
}
}
}
}

View File

@ -1,75 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
/**
* Internal Actions executed on the master fetching the warmer from the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportGetWarmersAction extends TransportClusterInfoAction<GetWarmersRequest, GetWarmersResponse> {
@Inject
public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetWarmersRequest::new);
}
@Override
protected String executor() {
// very lightweight operation, no need to fork
return ThreadPool.Names.SAME;
}
@Override
protected ClusterBlockException checkBlock(GetWarmersRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
}
@Override
protected GetWarmersResponse newResponse() {
return new GetWarmersResponse();
}
@Override
protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetWarmersResponse> listener) {
ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
concreteIndices, request.types(), request.warmers()
);
listener.onResponse(new GetWarmersResponse(result));
}
}

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Index / Search Warmer Administrative Actions
* <p>
* Index warming allows to run registered search requests to warm up the index before it is available for search.
* With the near real time aspect of search, cold data (segments) will be warmed up before they become available for
* search. This includes things such as the query cache, filesystem cache, and loading field data for fields.
* </p>
*
* See the reference guide for more detailed information about the Indices / Search Warmer
*/
package org.elasticsearch.action.admin.indices.warmer;

View File

@ -1,153 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A request that associates a {@link SearchRequest} with a name in the cluster that is
* in-turn used to warm up indices before they are available for search.
*
* Note: neither the search request nor the name must be <code>null</code>
*/
public class PutWarmerRequest extends AcknowledgedRequest<PutWarmerRequest> implements IndicesRequest.Replaceable {
private String name;
private SearchRequest searchRequest;
public PutWarmerRequest() {
}
/**
* Constructs a new warmer.
*
* @param name The name of the warmer.
*/
public PutWarmerRequest(String name) {
this.name = name;
}
/**
* Sets the name of the warmer.
*/
public PutWarmerRequest name(String name) {
this.name = name;
return this;
}
public String name() {
return this.name;
}
/**
* Sets the search request to warm.
*/
public PutWarmerRequest searchRequest(SearchRequest searchRequest) {
this.searchRequest = searchRequest;
return this;
}
/**
* Sets the search request to warm.
*/
public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) {
this.searchRequest = searchRequest.request();
return this;
}
public SearchRequest searchRequest() {
return this.searchRequest;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (searchRequest == null) {
validationException = addValidationError("search request is missing", validationException);
} else {
validationException = searchRequest.validate();
}
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
return validationException;
}
@Override
public String[] indices() {
if (searchRequest == null) {
throw new IllegalStateException("unable to retrieve indices, search request is null");
}
return searchRequest.indices();
}
@Override
public IndicesRequest indices(String[] indices) {
if (searchRequest == null) {
throw new IllegalStateException("unable to set indices, search request is null");
}
searchRequest.indices(indices);
return this;
}
@Override
public IndicesOptions indicesOptions() {
if (searchRequest == null) {
throw new IllegalStateException("unable to retrieve indices options, search request is null");
}
return searchRequest.indicesOptions();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
name = in.readString();
if (in.readBoolean()) {
searchRequest = new SearchRequest();
searchRequest.readFrom(in);
}
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
if (searchRequest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
searchRequest.writeTo(out);
}
writeTimeout(out);
}
}

View File

@ -1,72 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Builder for {@link PutWarmerRequest}
*
* @see PutWarmerRequest for details
*/
public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
/**
* Creates a new {@link PutWarmerRequestBuilder} with a given name.
*/
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action, String name) {
super(client, action, new PutWarmerRequest().name(name));
}
/**
* Creates a new {@link PutWarmerRequestBuilder}
* Note: {@link #setName(String)} must be called with a non-null value before this request is executed.
*/
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action) {
super(client, action, new PutWarmerRequest());
}
/**
* Sets the name of the warmer.
*/
public PutWarmerRequestBuilder setName(String name) {
request.name(name);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) {
request.searchRequest(searchRequest);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) {
request.searchRequest(searchRequest);
return this;
}
}

View File

@ -1,52 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* An acknowledged response of put warmer operation.
*/
public class PutWarmerResponse extends AcknowledgedResponse {
PutWarmerResponse() {
super();
}
PutWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}

View File

@ -1,167 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Internal Actions executed on the master associating a warmer with a name in the cluster state metadata.
*
* Note: this is an internal API and should not be used / called by any client code.
*/
public class TransportPutWarmerAction extends TransportMasterNodeAction<PutWarmerRequest, PutWarmerResponse> {
private final TransportSearchAction searchAction;
@Inject
public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
TransportSearchAction searchAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutWarmerRequest::new);
this.searchAction = searchAction;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected PutWarmerResponse newResponse() {
return new PutWarmerResponse();
}
@Override
protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
ClusterBlockException status = state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
if (status != null) {
return status;
}
// PutWarmer executes a SearchQuery before adding the new warmer to the cluster state,
// so we need to check the same block as TransportSearchTypeAction here
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener<PutWarmerResponse> listener) {
// first execute the search request, see that its ok...
SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request);
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
if (searchResponse.getFailedShards() > 0) {
listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures())));
return;
}
clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask<PutWarmerResponse>(request, listener) {
@Override
protected PutWarmerResponse newResponse(boolean acknowledged) {
return new PutWarmerResponse(acknowledged);
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices());
super.onFailure(source, t);
}
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(currentState, request.searchRequest().indicesOptions(), request.searchRequest().indices());
IndexWarmersMetaData.SearchSource source = null;
if (request.searchRequest().source() != null) {
source = new IndexWarmersMetaData.SearchSource(request.searchRequest().source());
}
// now replace it on the metadata
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (String index : concreteIndices) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers == null) {
logger.info("[{}] putting warmer [{}]", index, request.name());
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
boolean found = false;
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>(warmers.entries().size() + 1);
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
if (entry.name().equals(request.name())) {
found = true;
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
entries.add(entry);
}
}
if (!found) {
logger.info("[{}] put warmer [{}]", index, request.name());
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
} else {
logger.info("[{}] update warmer [{}]", index, request.name());
}
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
}
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
mdBuilder.put(indexBuilder);
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
});
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
}

View File

@ -52,7 +52,7 @@ public class PercolateShardRequest extends BroadcastShardRequest {
this.startTime = request.startTime;
}
PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
super(shardId, originalIndices);
}
@ -81,15 +81,15 @@ public class PercolateShardRequest extends BroadcastShardRequest {
return onlyCount;
}
void documentType(String documentType) {
public void documentType(String documentType) {
this.documentType = documentType;
}
void source(BytesReference source) {
public void source(BytesReference source) {
this.source = source;
}
void docSource(BytesReference docSource) {
public void docSource(BytesReference docSource) {
this.docSource = docSource;
}

View File

@ -18,11 +18,12 @@
*/
package org.elasticsearch.action.percolate;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolateContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
@ -43,31 +44,24 @@ import java.util.Map;
*/
public class PercolateShardResponse extends BroadcastShardResponse {
private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0];
private static final float[] EMPTY_SCORES = new float[0];
private static final List<Map<String, HighlightField>> EMPTY_HL = Collections.emptyList();
private long count;
private float[] scores;
private BytesRef[] matches;
private List<Map<String, HighlightField>> hls;
private byte percolatorTypeId;
private TopDocs topDocs;
private Map<Integer, String> ids;
private Map<Integer, Map<String, HighlightField>> hls;
private boolean onlyCount;
private int requestedSize;
private InternalAggregations aggregations;
private List<SiblingPipelineAggregator> pipelineAggregators;
PercolateShardResponse() {
hls = new ArrayList<>();
}
public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, float[] scores, PercolateContext context, ShardId shardId) {
super(shardId);
this.matches = matches;
public PercolateShardResponse(TopDocs topDocs, Map<Integer, String> ids, Map<Integer, Map<String, HighlightField>> hls, PercolateContext context) {
super(new ShardId(context.shardTarget().getIndex(), context.shardTarget().getShardId()));
this.topDocs = topDocs;
this.ids = ids;
this.hls = hls;
this.count = count;
this.scores = scores;
this.percolatorTypeId = context.percolatorTypeId;
this.onlyCount = context.isOnlyCount();
this.requestedSize = context.size();
QuerySearchResult result = context.queryResult();
if (result != null) {
@ -78,39 +72,25 @@ public class PercolateShardResponse extends BroadcastShardResponse {
}
}
public PercolateShardResponse(BytesRef[] matches, long count, float[] scores, PercolateContext context, ShardId shardId) {
this(matches, EMPTY_HL, count, scores, context, shardId);
public TopDocs topDocs() {
return topDocs;
}
public PercolateShardResponse(BytesRef[] matches, List<Map<String, HighlightField>> hls, long count, PercolateContext context, ShardId shardId) {
this(matches, hls, count, EMPTY_SCORES, context, shardId);
}
public PercolateShardResponse(long count, PercolateContext context, ShardId shardId) {
this(EMPTY_MATCHES, EMPTY_HL, count, EMPTY_SCORES, context, shardId);
}
public PercolateShardResponse(PercolateContext context, ShardId shardId) {
this(EMPTY_MATCHES, EMPTY_HL, 0, EMPTY_SCORES, context, shardId);
}
public BytesRef[] matches() {
return matches;
}
public float[] scores() {
return scores;
}
public long count() {
return count;
/**
* Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query.
*/
public Map<Integer, String> ids() {
return ids;
}
public int requestedSize() {
return requestedSize;
}
public List<Map<String, HighlightField>> hls() {
/**
* Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query.
*/
public Map<Integer, Map<String, HighlightField>> hls() {
return hls;
}
@ -122,36 +102,35 @@ public class PercolateShardResponse extends BroadcastShardResponse {
return pipelineAggregators;
}
public byte percolatorTypeId() {
return percolatorTypeId;
public boolean onlyCount() {
return onlyCount;
}
public boolean isEmpty() {
return percolatorTypeId == 0x00;
return topDocs.totalHits == 0;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
percolatorTypeId = in.readByte();
onlyCount = in.readBoolean();
requestedSize = in.readVInt();
count = in.readVLong();
matches = new BytesRef[in.readVInt()];
for (int i = 0; i < matches.length; i++) {
matches[i] = in.readBytesRef();
}
scores = new float[in.readVInt()];
for (int i = 0; i < scores.length; i++) {
scores[i] = in.readFloat();
}
topDocs = Lucene.readTopDocs(in);
int size = in.readVInt();
ids = new HashMap<>(size);
for (int i = 0; i < size; i++) {
ids.put(in.readVInt(), in.readString());
}
size = in.readVInt();
hls = new HashMap<>(size);
for (int i = 0; i < size; i++) {
int docId = in.readVInt();
int mSize = in.readVInt();
Map<String, HighlightField> fields = new HashMap<>();
for (int j = 0; j < mSize; j++) {
fields.put(in.readString(), HighlightField.readHighlightField(in));
}
hls.add(fields);
hls.put(docId, fields);
}
aggregations = InternalAggregations.readOptionalAggregations(in);
if (in.readBoolean()) {
@ -169,23 +148,21 @@ public class PercolateShardResponse extends BroadcastShardResponse {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeByte(percolatorTypeId);
out.writeBoolean(onlyCount);
out.writeVLong(requestedSize);
out.writeVLong(count);
out.writeVInt(matches.length);
for (BytesRef match : matches) {
out.writeBytesRef(match);
}
out.writeVLong(scores.length);
for (float score : scores) {
out.writeFloat(score);
Lucene.writeTopDocs(out, topDocs);
out.writeVInt(ids.size());
for (Map.Entry<Integer, String> entry : ids.entrySet()) {
out.writeVInt(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(hls.size());
for (Map<String, HighlightField> hl : hls) {
out.writeVInt(hl.size());
for (Map.Entry<String, HighlightField> entry : hl.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
for (Map.Entry<Integer, Map<String, HighlightField>> entry1 : hls.entrySet()) {
out.writeVInt(entry1.getKey());
out.writeVInt(entry1.getValue().size());
for (Map.Entry<String, HighlightField> entry2 : entry1.getValue().entrySet()) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
}
}
out.writeOptionalStreamable(aggregations);

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetRequest;
@ -43,6 +44,7 @@ import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -117,7 +119,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
List<PercolateShardResponse> shardResults = null;
List<ShardOperationFailedException> shardFailures = null;
byte percolatorTypeId = 0x00;
boolean onlyCount = false;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
@ -133,7 +135,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
successfulShards++;
if (!percolateShardResponse.isEmpty()) {
if (shardResults == null) {
percolatorTypeId = percolateShardResponse.percolatorTypeId();
onlyCount = percolateShardResponse.onlyCount();
shardResults = new ArrayList<>();
}
shardResults.add(percolateShardResponse);
@ -146,7 +148,12 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
} else {
PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults, request);
PercolatorService.ReduceResult result = null;
try {
result = percolatorService.reduce(onlyCount, shardResults, request);
} catch (IOException e) {
throw new ElasticsearchException("error during reduce phase", e);
}
long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime);
return new PercolateResponse(
shardsResponses.length(), successfulShards, failedShards, shardFailures,

View File

@ -26,10 +26,10 @@ import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -51,20 +51,6 @@ import java.util.function.Supplier;
* A base class for operations that needs to be performed on the master node.
*/
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
private static final ClusterStateObserver.ChangePredicate masterNodeChangedPredicate = new ClusterStateObserver.ChangePredicate() {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
// The condition !newState.nodes().masterNodeId().equals(previousState.nodes().masterNodeId()) is not sufficient as the same master node might get reelected after a disruption.
return newState.nodes().masterNodeId() != null && newState != previousState;
}
@Override
public boolean apply(ClusterChangedEvent event) {
return event.nodesDelta().masterNodeChanged();
}
};
protected final TransportService transportService;
protected final ClusterService clusterService;
@ -164,7 +150,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
retry(t, masterNodeChangedPredicate);
retry(t, MasterNodeChangePredicate.INSTANCE);
} else {
listener.onFailure(t);
}
@ -180,7 +166,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
} else {
if (nodes.masterNode() == null) {
logger.debug("no known master node, scheduling a retry");
retry(null, masterNodeChangedPredicate);
retry(null, MasterNodeChangePredicate.INSTANCE);
} else {
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
@Override
@ -195,7 +181,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
// we want to retry here a bit to see if a new master is elected
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
actionName, nodes.masterNode(), exp.getDetailedMessage());
retry(cause, masterNodeChangedPredicate);
retry(cause, MasterNodeChangePredicate.INSTANCE);
} else {
listener.onFailure(exp);
}

View File

@ -844,11 +844,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
// we never execute replication operation locally as primary operation has already completed locally
// hence, we ignore any local shard for replication
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
performOnReplica(shard, shard.currentNodeId());
performOnReplica(shard);
}
// send operation to relocating shard
if (shard.relocating()) {
performOnReplica(shard, shard.relocatingNodeId());
performOnReplica(shard.buildTargetRelocatingShard());
}
}
}
@ -856,9 +856,10 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/**
* send replica operation to target node
*/
void performOnReplica(final ShardRouting shard, final String nodeId) {
void performOnReplica(final ShardRouting shard) {
// if we don't have that node, it means that it might have failed and will be created again, in
// this case, we don't have to do the operation, and just let it failover
String nodeId = shard.currentNodeId();
if (!nodes.nodeExists(nodeId)) {
logger.trace("failed to send action [{}] on replica [{}] for request [{}] due to unknown node [{}]", transportReplicaAction, shard.shardId(), replicaRequest, nodeId);
onReplicaFailure(nodeId, null);

View File

@ -113,15 +113,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.common.Nullable;
/**
@ -771,51 +762,6 @@ public interface IndicesAdminClient extends ElasticsearchClient {
*/
ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
/**
* Puts an index search warmer to be applies when applicable.
*/
ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request);
/**
* Puts an index search warmer to be applies when applicable.
*/
void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener);
/**
* Puts an index search warmer to be applies when applicable.
*/
PutWarmerRequestBuilder preparePutWarmer(String name);
/**
* Deletes an index warmer.
*/
ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request);
/**
* Deletes an index warmer.
*/
void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener);
/**
* Deletes an index warmer.
*/
DeleteWarmerRequestBuilder prepareDeleteWarmer();
/**
* Returns a map of index warmers for the given get request.
*/
void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener);
/**
* Returns a map of index warmers for the given get request.
*/
ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request);
/**
* Returns a new builder to fetch index warmer metadata for the given indices.
*/
GetWarmersRequestBuilder prepareGetWarmers(String... indices);
/**
* Executed a per index settings get request and returns the settings for the indices specified.
* Note: this is a per index request and will not include settings that are set on the cluster

View File

@ -232,18 +232,6 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
@ -1669,51 +1657,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices);
}
@Override
public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
return execute(PutWarmerAction.INSTANCE, request);
}
@Override
public void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener) {
execute(PutWarmerAction.INSTANCE, request, listener);
}
@Override
public PutWarmerRequestBuilder preparePutWarmer(String name) {
return new PutWarmerRequestBuilder(this, PutWarmerAction.INSTANCE, name);
}
@Override
public ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request) {
return execute(DeleteWarmerAction.INSTANCE, request);
}
@Override
public void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener) {
execute(DeleteWarmerAction.INSTANCE, request, listener);
}
@Override
public DeleteWarmerRequestBuilder prepareDeleteWarmer() {
return new DeleteWarmerRequestBuilder(this, DeleteWarmerAction.INSTANCE);
}
@Override
public GetWarmersRequestBuilder prepareGetWarmers(String... indices) {
return new GetWarmersRequestBuilder(this, GetWarmersAction.INSTANCE, indices);
}
@Override
public ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request) {
return execute(GetWarmersAction.INSTANCE, request);
}
@Override
public void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener) {
execute(GetWarmersAction.INSTANCE, request, listener);
}
@Override
public GetSettingsRequestBuilder prepareGetSettings(String... indices) {
return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices);

View File

@ -17,30 +17,24 @@
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
package org.elasticsearch.cluster;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate {
INSTANCE;
/**
* Action for the admin/warmers/put API.
*/
public class PutWarmerAction extends Action<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
public static final PutWarmerAction INSTANCE = new PutWarmerAction();
public static final String NAME = "indices:admin/warmers/put";
private PutWarmerAction() {
super(NAME);
@Override
public boolean apply(
ClusterState previousState,
ClusterState.ClusterStateStatus previousStatus,
ClusterState newState,
ClusterState.ClusterStateStatus newStatus) {
// checking if the masterNodeId changed is insufficient as the
// same master node might get re-elected after a disruption
return newState.nodes().masterNodeId() != null && newState != previousState;
}
@Override
public PutWarmerResponse newResponse() {
return new PutWarmerResponse();
}
@Override
public PutWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new PutWarmerRequestBuilder(client, this);
public boolean apply(ClusterChangedEvent changedEvent) {
return changedEvent.nodesDelta().masterNodeChanged();
}
}

View File

@ -302,6 +302,10 @@ public class ShardStateAction extends AbstractComponent {
this.failure = failure;
}
public ShardRouting getShardRouting() {
return shardRouting;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);

View File

@ -46,7 +46,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@ -92,11 +91,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static Map<String, Custom> customPrototypes = new HashMap<>();
static {
// register non plugin custom metadata
registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO);
}
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
*/
@ -904,6 +898,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
}
}
} else if ("warmers".equals(currentFieldName)) {
// TODO: do this in 4.0:
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
// ignore: warmers have been removed in 3.0 and are
// simply ignored when upgrading from 2.x
assert Version.CURRENT.major <= 3;
} else {
// check if its a custom index metadata
Custom proto = lookupPrototype(currentFieldName);

View File

@ -55,12 +55,10 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
@ -71,7 +69,6 @@ import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.stream.Collectors;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
@ -365,49 +362,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return indexMapBuilder.build();
}
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
assert uncheckedWarmers != null;
assert concreteIndices != null;
if (concreteIndices.length == 0) {
return ImmutableOpenMap.of();
}
// special _all check to behave the same like not specifying anything for the warmers (not for the indices)
final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
for (String index : intersection) {
IndexMetaData indexMetaData = indices.get(index);
IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) {
continue;
}
// TODO: make this a List so we don't have to copy below
Collection<IndexWarmersMetaData.Entry> filteredWarmers =
indexWarmersMetaData
.entries()
.stream()
.filter(warmer -> {
if (warmers.length != 0 && types.length != 0) {
return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
} else if (warmers.length != 0) {
return Regex.simpleMatch(warmers, warmer.name());
} else if (types.length != 0) {
return Regex.simpleMatch(types, warmer.types());
} else {
return true;
}
})
.collect(Collectors.toCollection(ArrayList::new));
if (!filteredWarmers.isEmpty()) {
mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers)));
}
}
return mapBuilder.build();
}
/**
* Returns all the concrete indices.
*/

View File

@ -221,7 +221,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);

View File

@ -86,11 +86,6 @@ import java.util.Objects;
*
*/
public class Lucene {
// TODO: remove VERSION, and have users use Version.LATEST.
public static final Version VERSION = Version.LATEST;
public static final Version ANALYZER_VERSION = VERSION;
public static final Version QUERYPARSER_VERSION = VERSION;
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
public static final String LATEST_CODEC = "Lucene54";
@ -109,7 +104,6 @@ public class Lucene {
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f);
@SuppressWarnings("deprecation")
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
if (version == null) {
return defaultVersion;

View File

@ -88,9 +88,6 @@ import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemp
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
import org.elasticsearch.rest.action.bulk.RestBulkAction;
import org.elasticsearch.rest.action.cat.AbstractCatAction;
import org.elasticsearch.rest.action.cat.RestAliasAction;
@ -205,10 +202,6 @@ public class NetworkModule extends AbstractModule {
RestDeleteIndexTemplateAction.class,
RestHeadIndexTemplateAction.class,
RestPutWarmerAction.class,
RestDeleteWarmerAction.class,
RestGetWarmerAction.class,
RestPutMappingAction.class,
RestGetMappingAction.class,
RestGetFieldMappingAction.class,

View File

@ -117,7 +117,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
this.indexSettings = indexSettings;
this.analysisService = registry.build(indexSettings);
this.similarityService = similarityService;
this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry);
this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::getQueryShardContext);
this.indexFieldData = new IndexFieldDataService(indexSettings, nodeServicesProvider.getIndicesFieldDataCache(), nodeServicesProvider.getCircuitBreakerService(), mapperService);
this.shardStoreDeleter = shardStoreDeleter;
this.eventListener = eventListener;

View File

@ -89,12 +89,12 @@ public class Analysis {
// check for explicit version on the specific analyzer component
String sVersion = settings.get("version");
if (sVersion != null) {
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
return Lucene.parseVersion(sVersion, Version.LATEST, logger);
}
// check for explicit version on the index itself as default for all analysis components
sVersion = indexSettings.get("index.analysis.version");
if (sVersion != null) {
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
return Lucene.parseVersion(sVersion, Version.LATEST, logger);
}
// resolve the analysis version based on the version the index was created with
return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion;

View File

@ -181,6 +181,7 @@ public final class AnalysisRegistry implements Closeable {
tokenizers.put("standard", StandardTokenizerFactory::new);
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("keyword", KeywordTokenizerFactory::new);
tokenizers.put("letter", LetterTokenizerFactory::new);
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
@ -409,6 +410,7 @@ public final class AnalysisRegistry implements Closeable {
// Tokenizer aliases
tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("PathHierarchy", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.PATH_HIERARCHY.getTokenizerFactory(Version.CURRENT)));
// Token filters

View File

@ -50,6 +50,8 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.ParseContext.Document;
@ -176,10 +178,10 @@ public abstract class Engine implements Closeable {
* is enabled
*/
protected static final class IndexThrottle {
private final CounterMetric throttleTimeMillisMetric = new CounterMetric();
private volatile long startOfThrottleNS;
private static final ReleasableLock NOOP_LOCK = new ReleasableLock(new NoOpLock());
private final ReleasableLock lockReference = new ReleasableLock(new ReentrantLock());
private volatile ReleasableLock lock = NOOP_LOCK;
public Releasable acquireThrottle() {
@ -189,6 +191,7 @@ public abstract class Engine implements Closeable {
/** Activate throttling, which switches the lock to be a real lock */
public void activate() {
assert lock == NOOP_LOCK : "throttling activated while already active";
startOfThrottleNS = System.nanoTime();
lock = lockReference;
}
@ -196,7 +199,45 @@ public abstract class Engine implements Closeable {
public void deactivate() {
assert lock != NOOP_LOCK : "throttling deactivated but not active";
lock = NOOP_LOCK;
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
if (throttleTimeNS >= 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
}
}
long getThrottleTimeInMillis() {
long currentThrottleNS = 0;
if (isThrottled() && startOfThrottleNS != 0) {
currentThrottleNS += System.nanoTime() - startOfThrottleNS;
if (currentThrottleNS < 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value
currentThrottleNS = 0;
}
}
return throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS);
}
boolean isThrottled() {
return lock != NOOP_LOCK;
}
}
/**
* Returns the number of milliseconds this engine was under index throttling.
*/
public long getIndexThrottleTimeInMillis() {
return 0;
}
/**
* Returns the <code>true</code> iff this engine is currently under index throttling.
* @see #getIndexThrottleTimeInMillis()
*/
public boolean isThrottled() {
return false;
}
/** A Lock implementation that always allows the lock to be acquired */
@ -916,7 +957,7 @@ public abstract class Engine implements Closeable {
}
}
public static class GetResult {
public static class GetResult implements Releasable {
private final boolean exists;
private final long version;
private final Translog.Source source;
@ -962,6 +1003,11 @@ public abstract class Engine implements Closeable {
return docIdAndVersion;
}
@Override
public void close() {
release();
}
public void release() {
if (searcher != null) {
searcher.close();

View File

@ -58,7 +58,6 @@ public final class EngineConfig {
private final TimeValue flushMergesAfter;
private final String codecName;
private final ThreadPool threadPool;
private final ShardIndexingService indexingService;
private final Engine.Warmer warmer;
private final Store store;
private final SnapshotDeletionPolicy deletionPolicy;
@ -107,7 +106,7 @@ public final class EngineConfig {
/**
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
*/
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService,
public EngineConfig(ShardId shardId, ThreadPool threadPool,
IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
@ -116,7 +115,6 @@ public final class EngineConfig {
final Settings settings = indexSettings.getSettings();
this.indexSettings = indexSettings;
this.threadPool = threadPool;
this.indexingService = indexingService;
this.warmer = warmer == null ? (a,b) -> {} : warmer;
this.store = store;
this.deletionPolicy = deletionPolicy;
@ -239,18 +237,6 @@ public final class EngineConfig {
return threadPool;
}
/**
* Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about
* pre and post index. The operations are used for statistic purposes etc.
*
* @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index)
* @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index)
*
*/
public ShardIndexingService getIndexingService() {
return indexingService;
}
/**
* Returns an {@link org.elasticsearch.index.engine.Engine.Warmer} used to warm new searchers before they are used for searching.
*/

View File

@ -55,6 +55,7 @@ import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.lucene.index.ElasticsearchLeafReader;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.math.MathUtils;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
@ -95,7 +96,6 @@ public class InternalEngine extends Engine {
*/
private volatile long lastDeleteVersionPruneTimeMSec;
private final ShardIndexingService indexingService;
private final Engine.Warmer warmer;
private final Translog translog;
private final ElasticsearchConcurrentMergeScheduler mergeScheduler;
@ -131,7 +131,6 @@ public class InternalEngine extends Engine {
boolean success = false;
try {
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
this.indexingService = engineConfig.getIndexingService();
this.warmer = engineConfig.getWarmer();
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig());
this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough...
@ -422,8 +421,6 @@ public class InternalEngine extends Engine {
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
index.setTranslogLocation(translogLocation);
indexingService.postIndexUnderLock(index);
return created;
}
}
@ -524,7 +521,6 @@ public class InternalEngine extends Engine {
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation));
delete.setTranslogLocation(translogLocation);
indexingService.postDeleteUnderLock(delete);
}
}
@ -1059,6 +1055,10 @@ public class InternalEngine extends Engine {
throttle.deactivate();
}
public long getIndexThrottleTimeInMillis() {
return throttle.getThrottleTimeInMillis();
}
long getGcDeletesInMillis() {
return engineConfig.getGcDeletesInMillis();
}
@ -1081,7 +1081,6 @@ public class InternalEngine extends Engine {
if (numMergesInFlight.incrementAndGet() > maxNumMerges) {
if (isThrottling.getAndSet(true) == false) {
logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
indexingService.throttlingActivated();
activateThrottling();
}
}
@ -1093,7 +1092,6 @@ public class InternalEngine extends Engine {
if (numMergesInFlight.decrementAndGet() < maxNumMerges) {
if (isThrottling.getAndSet(false)) {
logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
indexingService.throttlingDeactivated();
deactivateThrottling();
}
}

View File

@ -32,16 +32,6 @@ public abstract class IndexingOperationListener {
return operation;
}
/**
* Called after the indexing occurs, under a locking scheme to maintain
* concurrent updates to the same doc.
* <p>
* Note, long operations should not occur under this callback.
*/
public void postIndexUnderLock(Engine.Index index) {
}
/**
* Called after the indexing operation occurred.
*/
@ -63,15 +53,6 @@ public abstract class IndexingOperationListener {
return delete;
}
/**
* Called after the delete occurs, under a locking scheme to maintain
* concurrent updates to the same doc.
* <p>
* Note, long operations should not occur under this callback.
*/
public void postDeleteUnderLock(Engine.Delete delete) {
}
/**
* Called after the delete operation occurred.

View File

@ -59,19 +59,19 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
* is returned for them. If they are set, then only types provided will be returned, or
* <tt>_all</tt> for all types.
*/
public IndexingStats stats(String... types) {
IndexingStats.Stats total = totalStats.stats();
public IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... types) {
IndexingStats.Stats total = totalStats.stats(isThrottled, currentThrottleInMillis);
Map<String, IndexingStats.Stats> typesSt = null;
if (types != null && types.length > 0) {
typesSt = new HashMap<>(typesStats.size());
if (types.length == 1 && types[0].equals("_all")) {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
typesSt.put(entry.getKey(), entry.getValue().stats());
typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis));
}
} else {
for (Map.Entry<String, StatsHolder> entry : typesStats.entrySet()) {
if (Regex.simpleMatch(types, entry.getKey())) {
typesSt.put(entry.getKey(), entry.getValue().stats());
typesSt.put(entry.getKey(), entry.getValue().stats(isThrottled, currentThrottleInMillis));
}
}
}
@ -87,14 +87,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
listeners.remove(listener);
}
public void throttlingActivated() {
totalStats.setThrottled(true);
}
public void throttlingDeactivated() {
totalStats.setThrottled(false);
}
public Engine.Index preIndex(Engine.Index operation) {
totalStats.indexCurrent.inc();
typeStats(operation.type()).indexCurrent.inc();
@ -104,16 +96,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
return operation;
}
public void postIndexUnderLock(Engine.Index index) {
for (IndexingOperationListener listener : listeners) {
try {
listener.postIndexUnderLock(index);
} catch (Exception e) {
logger.warn("postIndexUnderLock listener [{}] failed", e, listener);
}
}
}
public void postIndex(Engine.Index index) {
long took = index.endTime() - index.startTime();
totalStats.indexMetric.inc(took);
@ -154,15 +136,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
return delete;
}
public void postDeleteUnderLock(Engine.Delete delete) {
for (IndexingOperationListener listener : listeners) {
try {
listener.postDeleteUnderLock(delete);
} catch (Exception e) {
logger.warn("postDeleteUnderLock listener [{}] failed", e, listener);
}
}
}
public void postDelete(Engine.Delete delete) {
long took = delete.endTime() - delete.startTime();
@ -238,38 +211,12 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
public final CounterMetric indexFailed = new CounterMetric();
public final CounterMetric deleteCurrent = new CounterMetric();
public final CounterMetric noopUpdates = new CounterMetric();
public final CounterMetric throttleTimeMillisMetric = new CounterMetric();
volatile boolean isThrottled = false;
volatile long startOfThrottleNS;
public IndexingStats.Stats stats() {
long currentThrottleNS = 0;
if (isThrottled && startOfThrottleNS != 0) {
currentThrottleNS += System.nanoTime() - startOfThrottleNS;
if (currentThrottleNS < 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value
currentThrottleNS = 0;
}
}
public IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) {
return new IndexingStats.Stats(
indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(),
deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(),
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS)));
}
void setThrottled(boolean isThrottled) {
if (!this.isThrottled && isThrottled) {
startOfThrottleNS = System.nanoTime();
} else if (this.isThrottled && !isThrottled) {
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
if (throttleTimeNS >= 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
}
}
this.isThrottled = isThrottled;
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis));
}
public long totalCurrent() {

View File

@ -33,12 +33,14 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.mapper.object.RootObjectMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.function.Supplier;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.index.mapper.MapperBuilders.doc;
@ -49,6 +51,7 @@ public class DocumentMapperParser {
final AnalysisService analysisService;
private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class);
private final SimilarityService similarityService;
private final Supplier<QueryShardContext> queryShardContextSupplier;
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
@ -59,18 +62,20 @@ public class DocumentMapperParser {
private final Map<String, MetadataFieldMapper.TypeParser> rootTypeParsers;
public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, AnalysisService analysisService,
SimilarityService similarityService, MapperRegistry mapperRegistry) {
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings());
this.mapperService = mapperService;
this.analysisService = analysisService;
this.similarityService = similarityService;
this.queryShardContextSupplier = queryShardContextSupplier;
this.typeParsers = mapperRegistry.getMapperParsers();
this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers();
indexVersionCreated = indexSettings.getIndexVersionCreated();
}
public Mapper.TypeParser.ParserContext parserContext(String type) {
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher);
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
}
public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {

View File

@ -26,6 +26,8 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityProvider;
import java.util.Map;
@ -95,9 +97,11 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
private final ParseFieldMatcher parseFieldMatcher;
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
private final QueryShardContext queryShardContext;
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
MapperService mapperService, Function<String, TypeParser> typeParsers,
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) {
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) {
this.type = type;
this.analysisService = analysisService;
this.similarityLookupService = similarityLookupService;
@ -105,6 +109,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
this.typeParsers = typeParsers;
this.indexVersionCreated = indexVersionCreated;
this.parseFieldMatcher = parseFieldMatcher;
this.queryShardContext = queryShardContext;
}
public String type() {
@ -135,6 +140,10 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
return parseFieldMatcher;
}
public QueryShardContext queryShardContext() {
return queryShardContext;
}
public boolean isWithinMultiField() { return false; }
protected Function<String, TypeParser> typeParsers() { return typeParsers; }
@ -150,7 +159,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
static class MultiFieldParserContext extends ParserContext {
MultiFieldParserContext(ParserContext in) {
super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher());
super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
}
}

View File

@ -44,6 +44,7 @@ import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.indices.TypeMissingException;
@ -64,12 +65,12 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableMap;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
/**
@ -116,11 +117,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
final MapperRegistry mapperRegistry;
public MapperService(IndexSettings indexSettings, AnalysisService analysisService,
SimilarityService similarityService, MapperRegistry mapperRegistry) {
SimilarityService similarityService, MapperRegistry mapperRegistry,
Supplier<QueryShardContext> queryShardContextSupplier) {
super(indexSettings);
this.analysisService = analysisService;
this.fieldTypes = new FieldTypeLookup();
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry);
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, mapperRegistry, queryShardContextSupplier);
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
@ -131,8 +133,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
"\"_default_\":{\n" +
"\"properties\" : {\n" +
"\"query\" : {\n" +
"\"type\" : \"object\",\n" +
"\"enabled\" : false\n" +
"\"type\" : \"percolator\"\n" +
"}\n" +
"}\n" +
"}\n" +

View File

@ -0,0 +1,233 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Utility to extract query terms from queries and create queries from documents.
*/
public final class ExtractQueryTermsService {
private static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point
private ExtractQueryTermsService() {
}
/**
* Extracts all terms from the specified query and adds it to the specified document.
* @param query The query to extract terms from
* @param document The document to add the extracted terms to
* @param queryTermsFieldField The field in the document holding the extracted terms
* @param unknownQueryField The field used to mark a document that not all query terms could be extracted. For example
* the query contained an unsupported query (e.g. WildcardQuery).
* @param fieldType The field type for the query metadata field
*/
public static void extractQueryTerms(Query query, ParseContext.Document document, String queryTermsFieldField, String unknownQueryField, FieldType fieldType) {
Set<Term> queryTerms;
try {
queryTerms = extractQueryTerms(query);
} catch (UnsupportedQueryException e) {
document.add(new Field(unknownQueryField, new BytesRef(), fieldType));
return;
}
for (Term term : queryTerms) {
BytesRefBuilder builder = new BytesRefBuilder();
builder.append(new BytesRef(term.field()));
builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term.bytes());
document.add(new Field(queryTermsFieldField, builder.toBytesRef(), fieldType));
}
}
/**
* Extracts all query terms from the provided query and adds it to specified list.
*
* From boolean query with no should clauses or phrase queries only the the longest term are selected,
* since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored.
*
* If from part of the query, no query terms can be extracted then term extraction is stopped and
* an UnsupportedQueryException is thrown.
*/
static Set<Term> extractQueryTerms(Query query) {
// TODO: add support for the TermsQuery when it has methods to access the actual terms it encapsulates
// TODO: add support for span queries
if (query instanceof TermQuery) {
return Collections.singleton(((TermQuery) query).getTerm());
} else if (query instanceof PhraseQuery) {
Term[] terms = ((PhraseQuery) query).getTerms();
if (terms.length == 0) {
return Collections.emptySet();
}
// the longest term is likely to be the rarest,
// so from a performance perspective it makes sense to extract that
Term longestTerm = terms[0];
for (Term term : terms) {
if (longestTerm.bytes().length < term.bytes().length) {
longestTerm = term;
}
}
return Collections.singleton(longestTerm);
} else if (query instanceof BooleanQuery) {
List<BooleanClause> clauses = ((BooleanQuery) query).clauses();
boolean hasRequiredClauses = false;
for (BooleanClause clause : clauses) {
if (clause.isRequired()) {
hasRequiredClauses = true;
break;
}
}
if (hasRequiredClauses) {
Set<Term> bestClause = null;
for (BooleanClause clause : clauses) {
if (clause.isRequired() == false) {
// skip must_not clauses, we don't need to remember the things that do *not* match...
// skip should clauses, this bq has must clauses, so we don't need to remember should clauses, since they are completely optional.
continue;
}
Set<Term> temp = extractQueryTerms(clause.getQuery());
bestClause = selectTermListWithTheLongestShortestTerm(temp, bestClause);
}
if (bestClause != null) {
return bestClause;
} else {
return Collections.emptySet();
}
} else {
Set<Term> terms = new HashSet<>();
for (BooleanClause clause : clauses) {
if (clause.isProhibited()) {
// we don't need to remember the things that do *not* match...
continue;
}
terms.addAll(extractQueryTerms(clause.getQuery()));
}
return terms;
}
} else if (query instanceof ConstantScoreQuery) {
Query wrappedQuery = ((ConstantScoreQuery) query).getQuery();
return extractQueryTerms(wrappedQuery);
} else if (query instanceof BoostQuery) {
Query wrappedQuery = ((BoostQuery) query).getQuery();
return extractQueryTerms(wrappedQuery);
} else {
throw new UnsupportedQueryException(query);
}
}
static Set<Term> selectTermListWithTheLongestShortestTerm(Set<Term> terms1, Set<Term> terms2) {
if (terms1 == null) {
return terms2;
} else if (terms2 == null) {
return terms1;
} else {
int terms1ShortestTerm = minTermLength(terms1);
int terms2ShortestTerm = minTermLength(terms2);
// keep the clause with longest terms, this likely to be rarest.
if (terms1ShortestTerm >= terms2ShortestTerm) {
return terms1;
} else {
return terms2;
}
}
}
private static int minTermLength(Set<Term> terms) {
int min = Integer.MAX_VALUE;
for (Term term : terms) {
min = Math.min(min, term.bytes().length);
}
return min;
}
/**
* Creates a boolean query with a should clause for each term on all fields of the specified index reader.
*/
public static Query createQueryTermsQuery(IndexReader indexReader, String queryMetadataField, String unknownQueryField) throws IOException {
List<Term> extractedTerms = new ArrayList<>();
extractedTerms.add(new Term(unknownQueryField));
Fields fields = MultiFields.getFields(indexReader);
for (String field : fields) {
Terms terms = fields.terms(field);
if (terms == null) {
continue;
}
BytesRef fieldBr = new BytesRef(field);
TermsEnum tenum = terms.iterator();
for (BytesRef term = tenum.next(); term != null ; term = tenum.next()) {
BytesRefBuilder builder = new BytesRefBuilder();
builder.append(fieldBr);
builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term);
extractedTerms.add(new Term(queryMetadataField, builder.toBytesRef()));
}
}
return new TermsQuery(extractedTerms);
}
/**
* Exception indicating that none or some query terms couldn't extracted from a percolator query.
*/
public static class UnsupportedQueryException extends RuntimeException {
private final Query unsupportedQuery;
public UnsupportedQueryException(Query unsupportedQuery) {
super(LoggerMessageFormat.format("no query terms can be extracted from query [{}]", unsupportedQuery));
this.unsupportedQuery = unsupportedQuery;
}
/**
* The actual Lucene query that was unsupported and caused this exception to be thrown.
*/
public Query getUnsupportedQuery() {
return unsupportedQuery;
}
}
}

View File

@ -0,0 +1,150 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperBuilders;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class PercolatorFieldMapper extends FieldMapper {
public static final String NAME = "query";
public static final String CONTENT_TYPE = "percolator";
public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType();
private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query";
public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME;
public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME;
public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {
private final QueryShardContext queryShardContext;
public Builder(QueryShardContext queryShardContext) {
super(NAME, FIELD_TYPE, FIELD_TYPE);
this.queryShardContext = queryShardContext;
}
@Override
public PercolatorFieldMapper build(BuilderContext context) {
context.path().add(name);
StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
context.path().remove();
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField);
}
static StringFieldMapper.Builder createStringFieldBuilder(String name) {
StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name);
queryMetaDataFieldBuilder.docValues(false);
queryMetaDataFieldBuilder.store(false);
queryMetaDataFieldBuilder.tokenized(false);
queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
return queryMetaDataFieldBuilder;
}
}
public static class TypeParser implements FieldMapper.TypeParser {
@Override
public Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
return new Builder(parserContext.queryShardContext());
}
}
public static final class PercolatorFieldType extends MappedFieldType {
public PercolatorFieldType() {
setName(NAME);
setIndexOptions(IndexOptions.NONE);
setDocValuesType(DocValuesType.NONE);
setStored(false);
}
public PercolatorFieldType(MappedFieldType ref) {
super(ref);
}
@Override
public MappedFieldType clone() {
return new PercolatorFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
}
private final boolean mapUnmappedFieldAsString;
private final QueryShardContext queryShardContext;
private final StringFieldMapper queryTermsField;
private final StringFieldMapper unknownQueryField;
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.queryShardContext = queryShardContext;
this.queryTermsField = queryTermsField;
this.unknownQueryField = unknownQueryField;
this.mapUnmappedFieldAsString = indexSettings.getAsBoolean(PercolatorQueriesRegistry.MAP_UNMAPPED_FIELDS_AS_STRING, false);
}
@Override
public Mapper parse(ParseContext context) throws IOException {
QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext);
Query query = PercolatorQueriesRegistry.parseQuery(queryShardContext, mapUnmappedFieldAsString, context.parser());
if (context.flyweight() == false) {
ExtractQueryTermsService.extractQueryTerms(query, context.doc(), queryTermsField.name(), unknownQueryField.name(), queryTermsField.fieldType());
}
return null;
}
@Override
public Iterator<Mapper> iterator() {
return Arrays.<Mapper>asList(queryTermsField, unknownQueryField).iterator();
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
throw new UnsupportedOperationException("should not be invoked");
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
}

View File

@ -31,19 +31,15 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.indexing.IndexingOperationListener;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentTypeListener;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
@ -54,7 +50,6 @@ import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index.
@ -65,45 +60,27 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
// This is a shard level service, but these below are index level service:
private final MapperService mapperService;
private final IndexFieldDataService indexFieldDataService;
private final ShardIndexingService indexingService;
public final static String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener();
private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener();
private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false);
private final QueryShardContext queryShardContext;
private boolean mapUnmappedFieldsAsString;
private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric();
public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings,
ShardIndexingService indexingService, MapperService mapperService,
QueryShardContext queryShardContext,
IndexFieldDataService indexFieldDataService) {
public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) {
super(shardId, indexSettings);
this.mapperService = mapperService;
this.indexingService = indexingService;
this.queryShardContext = queryShardContext;
this.indexFieldDataService = indexFieldDataService;
this.mapUnmappedFieldsAsString = this.indexSettings.getSettings().getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false);
mapperService.addTypeListener(percolateTypeListener);
}
public ConcurrentMap<BytesRef, Query> percolateQueries() {
public ConcurrentMap<BytesRef, Query> getPercolateQueries() {
return percolateQueries;
}
@Override
public void close() {
mapperService.removeTypeListener(percolateTypeListener);
indexingService.removeListener(realTimePercolatorOperationListener);
clear();
}
@ -111,11 +88,6 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
percolateQueries.clear();
}
public void enableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(false, true)) {
indexingService.addListener(realTimePercolatorOperationListener);
}
}
public void addPercolateQuery(String idAsString, BytesReference source) {
Query newquery = parsePercolatorDocument(idAsString, source);
@ -133,9 +105,7 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
}
}
Query parsePercolatorDocument(String id, BytesReference source) {
String type = null;
BytesReference querySource = null;
public Query parsePercolatorDocument(String id, BytesReference source) {
try (XContentParser sourceParser = XContentHelper.createParser(source)) {
String currentFieldName = null;
XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT
@ -147,38 +117,21 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
currentFieldName = sourceParser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
if (type != null) {
return parseQuery(type, sourceParser);
} else {
XContentBuilder builder = XContentFactory.contentBuilder(sourceParser.contentType());
builder.copyCurrentStructure(sourceParser);
querySource = builder.bytes();
builder.close();
}
return parseQuery(queryShardContext, mapUnmappedFieldsAsString, sourceParser);
} else {
sourceParser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
sourceParser.skipChildren();
} else if (token.isValue()) {
if ("type".equals(currentFieldName)) {
type = sourceParser.text();
}
}
}
try (XContentParser queryParser = XContentHelper.createParser(querySource)) {
return parseQuery(type, queryParser);
}
} catch (Exception e) {
throw new PercolatorException(shardId().index(), "failed to parse query [" + id + "]", e);
}
return null;
}
private Query parseQuery(String type, XContentParser parser) {
String[] previousTypes = null;
if (type != null) {
previousTypes = QueryShardContext.setTypesWithPrevious(type);
}
public static Query parseQuery(QueryShardContext queryShardContext, boolean mapUnmappedFieldsAsString, XContentParser parser) {
QueryShardContext context = new QueryShardContext(queryShardContext);
try {
context.reset(parser);
@ -200,29 +153,16 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
} catch (IOException e) {
throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e);
} finally {
if (type != null) {
QueryShardContext.setTypes(previousTypes);
}
context.reset(null);
}
}
private class PercolateTypeListener implements DocumentTypeListener {
@Override
public void beforeCreate(DocumentMapper mapper) {
if (PercolatorService.TYPE_NAME.equals(mapper.type())) {
enableRealTimePercolator();
}
}
}
public void loadQueries(IndexReader reader) {
logger.trace("loading percolator queries...");
final int loadedQueries;
try {
Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger);
IndexSearcher indexSearcher = new IndexSearcher(reader);
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
@ -238,30 +178,26 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
logger.debug("done loading [{}] percolator queries", loadedQueries);
}
private class RealTimePercolatorOperationListener extends IndexingOperationListener {
@Override
public Engine.Index preIndex(Engine.Index operation) {
// validate the query here, before we index
if (PercolatorService.TYPE_NAME.equals(operation.type())) {
parsePercolatorDocument(operation.id(), operation.source());
}
return operation;
public boolean isPercolatorQuery(Engine.Index operation) {
if (PercolatorService.TYPE_NAME.equals(operation.type())) {
parsePercolatorDocument(operation.id(), operation.source());
return true;
}
return false;
}
@Override
public void postIndexUnderLock(Engine.Index index) {
// add the query under a doc lock
if (PercolatorService.TYPE_NAME.equals(index.type())) {
addPercolateQuery(index.id(), index.source());
}
}
public boolean isPercolatorQuery(Engine.Delete operation) {
return PercolatorService.TYPE_NAME.equals(operation.type());
}
@Override
public void postDeleteUnderLock(Engine.Delete delete) {
// remove the query under a lock
if (PercolatorService.TYPE_NAME.equals(delete.type())) {
removePercolateQuery(delete.id());
public synchronized void updatePercolateQuery(Engine engine, String id) {
// this can be called out of order as long as for every change to a percolator document it's invoked. This will always
// fetch the latest change but might fetch the same change twice if updates / deletes happen concurrently.
try (Engine.GetResult getResult = engine.get(new Engine.Get(true, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(PercolatorService.TYPE_NAME, id))))) {
if (getResult.exists()) {
addPercolateQuery(id, getResult.source().source);
} else {
removePercolateQuery(id);
}
}
}

View File

@ -45,17 +45,13 @@ final class QueriesLoaderCollector extends SimpleCollector {
private final Map<BytesRef, Query> queries = new HashMap<>();
private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true);
private final PercolatorQueriesRegistry percolator;
private final IndexFieldData<?> uidFieldData;
private final ESLogger logger;
private SortedBinaryDocValues uidValues;
private LeafReader reader;
QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger, MapperService mapperService, IndexFieldDataService indexFieldDataService) {
QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger) {
this.percolator = percolator;
this.logger = logger;
final MappedFieldType uidMapper = mapperService.fullName(UidFieldMapper.NAME);
this.uidFieldData = indexFieldDataService.getForField(uidMapper);
}
public Map<BytesRef, Query> queries() {
@ -64,35 +60,27 @@ final class QueriesLoaderCollector extends SimpleCollector {
@Override
public void collect(int doc) throws IOException {
// the _source is the query
fieldsVisitor.reset();
reader.document(doc, fieldsVisitor);
final Uid uid = fieldsVisitor.uid();
uidValues.setDocument(doc);
if (uidValues.count() > 0) {
assert uidValues.count() == 1;
final BytesRef uid = uidValues.valueAt(0);
final BytesRef id = Uid.splitUidIntoTypeAndId(uid)[1];
fieldsVisitor.reset();
reader.document(doc, fieldsVisitor);
try {
// id is only used for logging, if we fail we log the id in the catch statement
final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source());
if (parseQuery != null) {
queries.put(BytesRef.deepCopyOf(id), parseQuery);
} else {
logger.warn("failed to add query [{}] - parser returned null", id);
}
} catch (Exception e) {
logger.warn("failed to add query [{}]", e, id.utf8ToString());
try {
// id is only used for logging, if we fail we log the id in the catch statement
final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source());
if (parseQuery != null) {
queries.put(new BytesRef(uid.id()), parseQuery);
} else {
logger.warn("failed to add query [{}] - parser returned null", uid);
}
} catch (Exception e) {
logger.warn("failed to add query [{}]", e, uid);
}
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
reader = context.reader();
uidValues = uidFieldData.load(context).getBytesValues();
}
@Override

View File

@ -267,12 +267,7 @@ public class IndexShard extends AbstractIndexShardComponent {
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.provider = provider;
this.searcherWrapper = indexSearcherWrapper;
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, indexingService, mapperService, newQueryShardContext(), indexFieldDataService);
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
percolatorQueriesRegistry.enableRealTimePercolator();
}
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
// We start up inactive
active.set(false);
}
@ -500,7 +495,12 @@ public class IndexShard extends AbstractIndexShardComponent {
if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
}
created = getEngine().index(index);
final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(index);
Engine engine = getEngine();
created = engine.index(index);
if (isPercolatorQuery) {
percolatorQueriesRegistry.updatePercolateQuery(engine, index.id());
}
index.endTime(System.nanoTime());
} catch (Throwable ex) {
indexingService.postIndex(index, ex);
@ -537,7 +537,12 @@ public class IndexShard extends AbstractIndexShardComponent {
if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text());
}
getEngine().delete(delete);
final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(delete);
Engine engine = getEngine();
engine.delete(delete);
if (isPercolatorQuery) {
percolatorQueriesRegistry.updatePercolateQuery(engine, delete.id());
}
delete.endTime(System.nanoTime());
} catch (Throwable ex) {
indexingService.postDelete(delete, ex);
@ -585,7 +590,17 @@ public class IndexShard extends AbstractIndexShardComponent {
}
public IndexingStats indexingStats(String... types) {
return indexingService.stats(types);
Engine engine = getEngineOrNull();
final boolean throttled;
final long throttleTimeInMillis;
if (engine == null) {
throttled = false;
throttleTimeInMillis = 0;
} else {
throttled = engine.isThrottled();
throttleTimeInMillis = engine.getIndexThrottleTimeInMillis();
}
return indexingService.stats(throttled, throttleTimeInMillis, types);
}
public SearchStats searchStats(String... groups) {
@ -1470,7 +1485,7 @@ public class IndexShard extends AbstractIndexShardComponent {
};
final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel);
return new EngineConfig(shardId,
threadPool, indexingService, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
threadPool, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, inactiveTime);
}

View File

@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.IndexShardComponent;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
@ -163,6 +164,21 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try {
if (translogGeneration != null) {
final Checkpoint checkpoint = readCheckpoint();
final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1));
final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
// this is special handling for error condition when we create a new writer but we fail to bake
// the newly written file (generation+1) into the checkpoint. This is still a valid state
// we just need to cleanup before we continue
// we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this:
// https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example
//
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists
// if not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
}
this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint);
if (recoveredTranslogs.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered");
@ -425,7 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (config.isSyncOnEachOperation()) {
current.sync();
}
assert current.assertBytesAtLocation(location, bytes);
assert assertBytesAtLocation(location, bytes);
return location;
}
} catch (AlreadyClosedException | IOException ex) {
@ -439,6 +455,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
}
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
// tests can override this
ByteBuffer buffer = ByteBuffer.allocate(location.size);
current.readBytes(buffer, location.translogLocation);
return new BytesArray(buffer.array()).equals(expectedBytes);
}
/**
* Snapshots the current transaction log allowing to safely iterate over the snapshot.
* Snapshots are fixed in time and will not be updated with future operations.

View File

@ -69,9 +69,17 @@ public class TranslogWriter extends TranslogReader {
totalOffset = lastSyncedOffset;
}
static int getHeaderLength(String translogUUID) {
return getHeaderLength(new BytesRef(translogUUID).length);
}
private static int getHeaderLength(int uuidLength) {
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT;
}
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
final BytesRef ref = new BytesRef(translogUUID);
final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT;
final int headerLength = getHeaderLength(ref.length);
final FileChannel channel = channelFactory.open(file);
try {
// This OutputStreamDataOutput is intentionally not closed because
@ -80,17 +88,14 @@ public class TranslogWriter extends TranslogReader {
CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION);
out.writeInt(ref.length);
out.writeBytes(ref.bytes, ref.offset, ref.length);
channel.force(false);
channel.force(true);
writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE);
final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize);
return writer;
} catch (Throwable throwable){
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
// file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
IOUtils.closeWhileHandlingException(channel);
try {
Files.delete(file); // remove the file as well
} catch (IOException ex) {
throwable.addSuppressed(ex);
}
throw throwable;
}
}
@ -213,11 +218,6 @@ public class TranslogWriter extends TranslogReader {
}
}
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(location.size);
readBytes(buffer, location.translogLocation);
return new BytesArray(buffer.array()).equals(expectedBytes);
}
private long getWrittenOffset() throws IOException {
return channelReference.getChannel().position();

View File

@ -56,6 +56,7 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.query.BoolQueryParser;
import org.elasticsearch.index.query.BoostingQueryParser;
import org.elasticsearch.index.query.CommonTermsQueryParser;
@ -211,6 +212,7 @@ public class IndicesModule extends AbstractModule {
registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
registerMapper(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser());
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());

View File

@ -29,9 +29,15 @@ import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
@ -76,7 +82,17 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex {
try {
MultiReader mReader = new MultiReader(memoryIndices, true);
LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader);
final IndexSearcher slowSearcher = new IndexSearcher(slowReader);
final IndexSearcher slowSearcher = new IndexSearcher(slowReader) {
@Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(query, BooleanClause.Occur.MUST);
bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT);
return super.createNormalizedWeight(bq.build(), needsScores);
}
};
slowSearcher.setQueryCache(null);
DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex);
context.initialize(docSearcher, parsedDocument);

View File

@ -27,7 +27,6 @@ import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.action.search.SearchType;
@ -49,7 +48,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
@ -84,17 +82,12 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
/**
*/
public class PercolateContext extends SearchContext {
private final PercolatorQueriesRegistry percolateQueryRegistry;
public boolean limit;
private int size;
public boolean doSort;
public byte percolatorTypeId;
private int size = 10;
private boolean trackScores;
private final SearchShardTarget searchShardTarget;
@ -104,10 +97,12 @@ public class PercolateContext extends SearchContext {
private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final MapperService mapperService;
private final int numberOfShards;
private final Query aliasFilter;
private final long originNanoTime = System.nanoTime();
private final long startTime;
private final boolean onlyCount;
private String[] types;
private Engine.Searcher docSearcher;
@ -135,8 +130,8 @@ public class PercolateContext extends SearchContext {
this.indexService = indexService;
this.fetchPhase = fetchPhase;
this.fieldDataService = indexService.fieldData();
this.mapperService = indexService.mapperService();
this.searchShardTarget = searchShardTarget;
this.percolateQueryRegistry = indexShard.percolateRegistry();
this.types = new String[]{request.documentType()};
this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays.withCircuitBreaking();
@ -147,6 +142,25 @@ public class PercolateContext extends SearchContext {
this.numberOfShards = request.getNumberOfShards();
this.aliasFilter = aliasFilter;
this.startTime = request.getStartTime();
this.onlyCount = request.onlyCount();
}
// for testing:
PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, MapperService mapperService) {
super(null, request);
this.searchShardTarget = searchShardTarget;
this.mapperService = mapperService;
this.indexService = null;
this.indexShard = null;
this.fieldDataService = null;
this.pageCacheRecycler = null;
this.bigArrays = null;
this.scriptService = null;
this.aliasFilter = null;
this.startTime = 0;
this.numberOfShards = 0;
this.onlyCount = true;
this.fetchPhase = null;
}
public IndexSearcher docSearcher() {
@ -181,10 +195,6 @@ public class PercolateContext extends SearchContext {
return indexService;
}
public ConcurrentMap<BytesRef, Query> percolateQueries() {
return percolateQueryRegistry.percolateQueries();
}
public Query percolateQuery() {
return percolateQuery;
}
@ -200,6 +210,14 @@ public class PercolateContext extends SearchContext {
return hitContext;
}
public boolean isOnlyCount() {
return onlyCount;
}
public Query percolatorTypeFilter(){
return indexService().mapperService().documentMapper(PercolatorService.TYPE_NAME).typeFilter();
}
@Override
public SearchContextHighlight highlight() {
return highlight;
@ -234,7 +252,7 @@ public class PercolateContext extends SearchContext {
@Override
public MapperService mapperService() {
return indexService.mapperService();
return mapperService;
}
@Override
@ -535,7 +553,6 @@ public class PercolateContext extends SearchContext {
@Override
public SearchContext size(int size) {
this.size = size;
this.limit = true;
return this;
}

View File

@ -0,0 +1,224 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.highlight.HighlightPhase;
import org.elasticsearch.search.sort.SortParseElement;
import java.util.Map;
import static org.elasticsearch.index.mapper.SourceToParse.source;
public class PercolateDocumentParser {
private final HighlightPhase highlightPhase;
private final SortParseElement sortParseElement;
private final AggregationPhase aggregationPhase;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public PercolateDocumentParser(HighlightPhase highlightPhase, SortParseElement sortParseElement, AggregationPhase aggregationPhase, MappingUpdatedAction mappingUpdatedAction) {
this.highlightPhase = highlightPhase;
this.sortParseElement = sortParseElement;
this.aggregationPhase = aggregationPhase;
this.mappingUpdatedAction = mappingUpdatedAction;
}
public ParsedDocument parse(PercolateShardRequest request, PercolateContext context, MapperService mapperService, QueryShardContext queryShardContext) {
BytesReference source = request.source();
if (source == null || source.length() == 0) {
if (request.docSource() != null && request.docSource().length() != 0) {
return parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType());
} else {
return null;
}
}
// TODO: combine all feature parse elements into one map
Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements();
ParsedDocument doc = null;
// Some queries (function_score query when for decay functions) rely on a SearchContext being set:
// We switch types because this context needs to be in the context of the percolate queries in the shard and
// not the in memory percolate doc
String[] previousTypes = context.types();
context.types(new String[]{PercolatorService.TYPE_NAME});
try (XContentParser parser = XContentFactory.xContent(source).createParser(source);) {
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
// we need to check the "doc" here, so the next token will be START_OBJECT which is
// the actual document starting
if ("doc".equals(currentFieldName)) {
if (doc != null) {
throw new ElasticsearchParseException("Either specify doc or get, not both");
}
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType());
String index = context.shardTarget().index();
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true));
if (docMapper.getMapping() != null) {
doc.addDynamicMappingsUpdate(docMapper.getMapping());
}
if (doc.dynamicMappingsUpdate() != null) {
mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate());
}
// the document parsing exists the "doc" object, so we need to set the new current field.
currentFieldName = parser.currentName();
}
} else if (token == XContentParser.Token.START_OBJECT) {
SearchParseElement element = hlElements.get(currentFieldName);
if (element == null) {
element = aggregationElements.get(currentFieldName);
}
if ("query".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
}
context.percolateQuery(queryShardContext.parse(parser).query());
} else if ("filter".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
}
Query filter = queryShardContext.parseInnerFilter(parser).query();
context.percolateQuery(new ConstantScoreQuery(filter));
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if (element != null) {
element.parse(parser, context);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
}
} else if (token == null) {
break;
} else if (token.isValue()) {
if ("size".equals(currentFieldName)) {
context.size(parser.intValue());
if (context.size() < 0) {
throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size());
}
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
context.trackScores(parser.booleanValue());
}
}
}
// We need to get the actual source from the request body for highlighting, so parse the request body again
// and only get the doc source.
if (context.highlight() != null) {
parser.close();
currentFieldName = null;
try (XContentParser parserForHighlighter = XContentFactory.xContent(source).createParser(source)) {
token = parserForHighlighter.nextToken();
assert token == XContentParser.Token.START_OBJECT;
while ((token = parserForHighlighter.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parserForHighlighter.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("doc".equals(currentFieldName)) {
BytesStreamOutput bStream = new BytesStreamOutput();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
builder.copyCurrentStructure(parserForHighlighter);
builder.close();
doc.setSource(bStream.bytes());
break;
} else {
parserForHighlighter.skipChildren();
}
} else if (token == null) {
break;
}
}
}
}
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
} finally {
context.types(previousTypes);
}
if (request.docSource() != null && request.docSource().length() != 0) {
if (doc != null) {
throw new IllegalArgumentException("Can't specify the document to percolate in the source of the request and as document id");
}
doc = parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndex(), request.documentType());
}
if (doc == null) {
throw new IllegalArgumentException("Nothing to percolate");
}
return doc;
}
private void parseSort(XContentParser parser, PercolateContext context) throws Exception {
context.trackScores(true);
sortParseElement.parse(parser, context);
// null, means default sorting by relevancy
if (context.sort() != null) {
throw new ElasticsearchParseException("Only _score desc is supported");
}
}
private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, MapperService mapperService, String index, String type) {
try (XContentParser parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc)) {
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
ParsedDocument doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true));
if (doc == null) {
throw new ElasticsearchParseException("No doc to percolate in the request");
}
if (context.highlight() != null) {
doc.setSource(fetchedDoc);
}
return doc;
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
}
}
}

View File

@ -27,6 +27,7 @@ public class PercolatorModule extends AbstractModule {
@Override
protected void configure() {
bind(PercolateDocumentParser.class).asEagerSingleton();
bind(PercolatorService.class).asEagerSingleton();
}
}

View File

@ -0,0 +1,250 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.ExtractQueryTermsService;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
final class PercolatorQuery extends Query {
public static final float MATCH_COST =
(1 << 14) // stored field access cost, approximated by the number of bytes in a block
+ 1000; // cost of matching the query against the document, arbitrary as it would be really complex to estimate
static class Builder {
private final IndexSearcher percolatorIndexSearcher;
private final Map<BytesRef, Query> percolatorQueries;
private Query percolateQuery;
private Query queriesMetaDataQuery;
private final Query percolateTypeQuery;
/**
* @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated
* @param percolatorQueries All the registered percolator queries
* @param percolateTypeQuery A query that identifies all document containing percolator queries
*/
Builder(IndexSearcher percolatorIndexSearcher, Map<BytesRef, Query> percolatorQueries, Query percolateTypeQuery) {
this.percolatorIndexSearcher = percolatorIndexSearcher;
this.percolatorQueries = percolatorQueries;
this.percolateTypeQuery = percolateTypeQuery;
}
/**
* Optionally sets a query that reduces the number of queries to percolate based on custom metadata attached
* on the percolator documents.
*/
void setPercolateQuery(Query percolateQuery) {
this.percolateQuery = percolateQuery;
}
/**
* Optionally sets a query that reduces the number of queries to percolate based on extracted terms from
* the document to be percolated.
*
* @param extractedTermsFieldName The name of the field to get the extracted terms from
* @param unknownQueryFieldname The field used to mark documents whose queries couldn't all get extracted
*/
void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException {
this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery(percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname);
}
PercolatorQuery build() {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(percolateTypeQuery, FILTER);
if (queriesMetaDataQuery != null) {
builder.add(queriesMetaDataQuery, FILTER);
}
if (percolateQuery != null){
builder.add(percolateQuery, MUST);
}
return new PercolatorQuery(builder.build(), percolatorIndexSearcher, percolatorQueries);
}
}
private final Query percolatorQueriesQuery;
private final IndexSearcher percolatorIndexSearcher;
private final Map<BytesRef, Query> percolatorQueries;
private PercolatorQuery(Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher, Map<BytesRef, Query> percolatorQueries) {
this.percolatorQueriesQuery = percolatorQueriesQuery;
this.percolatorIndexSearcher = percolatorIndexSearcher;
this.percolatorQueries = percolatorQueries;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1f) {
return super.rewrite(reader);
}
Query rewritten = percolatorQueriesQuery.rewrite(reader);
if (rewritten != percolatorQueriesQuery) {
return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries);
} else {
return this;
}
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
final Weight innerWeight = percolatorQueriesQuery.createWeight(searcher, needsScores);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> set) {
}
@Override
public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
Scorer scorer = scorer(leafReaderContext);
if (scorer != null) {
int result = scorer.iterator().advance(docId);
if (result == docId) {
return Explanation.match(scorer.score(), "PercolatorQuery");
}
}
return Explanation.noMatch("PercolatorQuery");
}
@Override
public float getValueForNormalization() throws IOException {
return innerWeight.getValueForNormalization();
}
@Override
public void normalize(float v, float v1) {
innerWeight.normalize(v, v1);
}
@Override
public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException {
final Scorer approximation = innerWeight.scorer(leafReaderContext);
if (approximation == null) {
return null;
}
final LeafReader leafReader = leafReaderContext.reader();
return new Scorer(this) {
@Override
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return new TwoPhaseIterator(approximation.iterator()) {
@Override
public boolean matches() throws IOException {
return matchDocId(approximation.docID(), leafReader);
}
@Override
public float matchCost() {
return MATCH_COST;
}
};
}
@Override
public float score() throws IOException {
return approximation.score();
}
@Override
public int freq() throws IOException {
return approximation.freq();
}
@Override
public int docID() {
return approximation.docID();
}
boolean matchDocId(int docId, LeafReader leafReader) throws IOException {
SingleFieldsVisitor singleFieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME);
leafReader.document(docId, singleFieldsVisitor);
BytesRef percolatorQueryId = new BytesRef(singleFieldsVisitor.uid().id());
return matchQuery(percolatorQueryId);
}
};
}
};
}
boolean matchQuery(BytesRef percolatorQueryId) throws IOException {
Query percolatorQuery = percolatorQueries.get(percolatorQueryId);
if (percolatorQuery != null) {
return Lucene.exists(percolatorIndexSearcher, percolatorQuery);
} else {
return false;
}
}
private final Object instance = new Object();
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
PercolatorQuery that = (PercolatorQuery) o;
return instance.equals(that.instance);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + instance.hashCode();
return result;
}
@Override
public String toString(String s) {
return "PercolatorQuery{inner={" + percolatorQueriesQuery.toString(s) + "}}";
}
}

View File

@ -18,137 +18,114 @@
*/
package org.elasticsearch.percolator;
import com.carrotsearch.hppc.IntObjectHashMap;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.memory.ExtendedMemoryIndex;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.action.percolate.PercolateShardResponse;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.HasContextAndHeaders;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.percolator.QueryCollector.Count;
import org.elasticsearch.percolator.QueryCollector.Match;
import org.elasticsearch.percolator.QueryCollector.MatchAndScore;
import org.elasticsearch.percolator.QueryCollector.MatchAndSort;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortParseElement;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static org.elasticsearch.index.mapper.SourceToParse.source;
import static org.elasticsearch.percolator.QueryCollector.count;
import static org.elasticsearch.percolator.QueryCollector.match;
import static org.elasticsearch.percolator.QueryCollector.matchAndScore;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
public class PercolatorService extends AbstractComponent {
public final static float NO_SCORE = Float.NEGATIVE_INFINITY;
public final static String TYPE_NAME = ".percolator";
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final IndicesService indicesService;
private final IntObjectHashMap<PercolatorType> percolatorTypes;
private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final IndicesService indicesService;
private final ClusterService clusterService;
private final HighlightPhase highlightPhase;
private final AggregationPhase aggregationPhase;
private final PageCacheRecycler pageCacheRecycler;
private final CloseableThreadLocal<MemoryIndex> cache;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final PercolateDocumentParser percolateDocumentParser;
private final PercolatorIndex single;
private final PercolatorIndex multi;
private final HighlightPhase highlightPhase;
private final AggregationPhase aggregationPhase;
private final SortParseElement sortParseElement;
private final ScriptService scriptService;
private final MappingUpdatedAction mappingUpdatedAction;
private final CloseableThreadLocal<MemoryIndex> cache;
private final ParseFieldMatcher parseFieldMatcher;
private final FetchPhase fetchPhase;
@Inject
public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService,
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, HighlightPhase highlightPhase, ClusterService clusterService,
AggregationPhase aggregationPhase, ScriptService scriptService, MappingUpdatedAction mappingUpdatedAction,
FetchPhase fetchPhase) {
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
HighlightPhase highlightPhase, ClusterService clusterService,
AggregationPhase aggregationPhase, ScriptService scriptService,
PercolateDocumentParser percolateDocumentParser, FetchPhase fetchPhase) {
super(settings);
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.percolateDocumentParser = percolateDocumentParser;
this.fetchPhase = fetchPhase;
this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.indicesService = indicesService;
this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays;
this.clusterService = clusterService;
this.highlightPhase = highlightPhase;
this.aggregationPhase = aggregationPhase;
this.scriptService = scriptService;
this.mappingUpdatedAction = mappingUpdatedAction;
this.sortParseElement = new SortParseElement();
this.aggregationPhase = aggregationPhase;
this.highlightPhase = highlightPhase;
final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes();
cache = new CloseableThreadLocal<MemoryIndex>() {
@ -160,23 +137,41 @@ public class PercolatorService extends AbstractComponent {
};
single = new SingleDocumentPercolatorIndex(cache);
multi = new MultiDocumentPercolatorIndex(cache);
percolatorTypes = new IntObjectHashMap<>(6);
percolatorTypes.put(countPercolator.id(), countPercolator);
percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
percolatorTypes.put(matchPercolator.id(), matchPercolator);
percolatorTypes.put(queryPercolator.id(), queryPercolator);
percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
}
public ReduceResult reduce(byte percolatorTypeId, List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId);
return percolatorType.reduce(shardResults, headersContext);
public ReduceResult reduce(boolean onlyCount, List<PercolateShardResponse> shardResponses, HasContextAndHeaders headersContext) throws IOException {
if (onlyCount) {
long finalCount = 0;
for (PercolateShardResponse shardResponse : shardResponses) {
finalCount += shardResponse.topDocs().totalHits;
}
public PercolateShardResponse percolate(PercolateShardRequest request) {
InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext);
return new PercolatorService.ReduceResult(finalCount, reducedAggregations);
} else {
int requestedSize = shardResponses.get(0).requestedSize();
TopDocs[] shardResults = new TopDocs[shardResponses.size()];
long foundMatches = 0;
for (int i = 0; i < shardResults.length; i++) {
TopDocs shardResult = shardResponses.get(i).topDocs();
foundMatches += shardResult.totalHits;
shardResults[i] = shardResult;
}
TopDocs merged = TopDocs.merge(requestedSize, shardResults);
PercolateResponse.Match[] matches = new PercolateResponse.Match[merged.scoreDocs.length];
for (int i = 0; i < merged.scoreDocs.length; i++) {
ScoreDoc doc = merged.scoreDocs[i];
PercolateShardResponse shardResponse = shardResponses.get(doc.shardIndex);
String id = shardResponse.ids().get(doc.doc);
Map<String, HighlightField> hl = shardResponse.hls().get(doc.doc);
matches[i] = new PercolateResponse.Match(new Text(shardResponse.getIndex()), new Text(id), doc.score, hl);
}
InternalAggregations reducedAggregations = reduceAggregations(shardResponses, headersContext);
return new PercolatorService.ReduceResult(foundMatches, matches, reducedAggregations);
}
}
public PercolateShardResponse percolate(PercolateShardRequest request) throws IOException {
IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = percolateIndexService.getShard(request.shardId().id());
indexShard.readAllowed(); // check if we can read the shard...
@ -199,29 +194,11 @@ public class PercolatorService extends AbstractComponent {
pageCacheRecycler, bigArrays, scriptService, aliasFilter, parseFieldMatcher, fetchPhase);
SearchContext.setCurrent(context);
try {
ParsedDocument parsedDocument = parseRequest(indexShard, request, context, request.shardId().getIndex());
if (context.percolateQueries().isEmpty()) {
return new PercolateShardResponse(context, request.shardId());
}
ParsedDocument parsedDocument = percolateDocumentParser.parse(request, context, percolateIndexService.mapperService(), percolateIndexService.getQueryShardContext());
if (request.docSource() != null && request.docSource().length() != 0) {
parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.shardId().getIndex(), request.documentType());
} else if (parsedDocument == null) {
throw new IllegalArgumentException("Nothing to percolate");
if (context.searcher().getIndexReader().maxDoc() == 0) {
return new PercolateShardResponse(Lucene.EMPTY_TOP_DOCS, Collections.emptyMap(), Collections.emptyMap(), context);
}
if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) {
context.percolateQuery(new MatchAllDocsQuery());
}
if (context.doSort && !context.limit) {
throw new IllegalArgumentException("Can't sort if size isn't specified");
}
if (context.highlight() != null && !context.limit) {
throw new IllegalArgumentException("Can't highlight if size isn't specified");
}
if (context.size() < 0) {
context.size(0);
}
@ -235,23 +212,27 @@ public class PercolatorService extends AbstractComponent {
} else {
percolatorIndex = single;
}
percolatorIndex.prepare(context, parsedDocument);
PercolatorType action;
if (request.onlyCount()) {
action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
} else {
if (context.doSort) {
action = topMatchingPercolator;
} else if (context.percolateQuery() != null) {
action = context.trackScores() ? scoringPercolator : queryPercolator;
} else {
action = matchPercolator;
BucketCollector aggregatorCollector = null;
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators();
List<Aggregator> aggregatorCollectors = new ArrayList<>(aggregators.length);
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
aggregatorCollectors.add(aggregator);
}
}
context.percolatorTypeId = action.id();
percolatorIndex.prepare(context, parsedDocument);
return action.doPercolate(request, context, isNested);
context.aggregations().aggregators(aggregators);
aggregatorCollector = BucketCollector.wrap(aggregatorCollectors);
aggregatorCollector.preCollection();
}
PercolatorQueriesRegistry queriesRegistry = indexShard.percolateRegistry();
return doPercolate(context, queriesRegistry, aggregationPhase, aggregatorCollector, highlightPhase);
} finally {
SearchContext.removeCurrent();
context.close();
@ -259,568 +240,103 @@ public class PercolatorService extends AbstractComponent {
}
}
private ParsedDocument parseRequest(IndexShard shard, PercolateShardRequest request, PercolateContext context, String index) {
BytesReference source = request.source();
if (source == null || source.length() == 0) {
return null;
// moved the core percolation logic to a pck protected method to make testing easier:
static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException {
PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter());
if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_3_0_0)) {
builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME);
}
// TODO: combine all feature parse elements into one map
Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements();
ParsedDocument doc = null;
XContentParser parser = null;
// Some queries (function_score query when for decay functions) rely on a SearchContext being set:
// We switch types because this context needs to be in the context of the percolate queries in the shard and
// not the in memory percolate doc
String[] previousTypes = context.types();
context.types(new String[]{TYPE_NAME});
QueryShardContext queryShardContext = shard.getQueryShardContext();
try {
parser = XContentFactory.xContent(source).createParser(source);
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
// we need to check the "doc" here, so the next token will be START_OBJECT which is
// the actual document starting
if ("doc".equals(currentFieldName)) {
if (doc != null) {
throw new ElasticsearchParseException("Either specify doc or get, not both");
}
MapperService mapperService = shard.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType());
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true));
if (docMapper.getMapping() != null) {
doc.addDynamicMappingsUpdate(docMapper.getMapping());
}
if (doc.dynamicMappingsUpdate() != null) {
mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate());
}
// the document parsing exists the "doc" object, so we need to set the new current field.
currentFieldName = parser.currentName();
}
} else if (token == XContentParser.Token.START_OBJECT) {
SearchParseElement element = hlElements.get(currentFieldName);
if (element == null) {
element = aggregationElements.get(currentFieldName);
}
if ("query".equals(currentFieldName)) {
if (context.percolateQuery() != null || context.aliasFilter() != null) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
bq.add(context.percolateQuery(), MUST);
}
context.percolateQuery(queryShardContext.parse(parser).query());
} else if ("filter".equals(currentFieldName)) {
if (context.percolateQuery() != null) {
throw new ElasticsearchParseException("Either specify query or filter, not both");
if (context.aliasFilter() != null) {
bq.add(context.aliasFilter(), FILTER);
}
Query filter = queryShardContext.parseInnerFilter(parser).query();
context.percolateQuery(new ConstantScoreQuery(filter));
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if (element != null) {
element.parse(parser, context);
builder.setPercolateQuery(bq.build());
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
}
} else if (token == null) {
break;
} else if (token.isValue()) {
if ("size".equals(currentFieldName)) {
context.size(parser.intValue());
if (context.size() < 0) {
throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size());
PercolatorQuery percolatorQuery = builder.build();
if (context.isOnlyCount() || context.size() == 0) {
TotalHitCountCollector collector = new TotalHitCountCollector();
context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector));
if (aggregatorCollector != null) {
aggregatorCollector.postCollection();
aggregationPhase.execute(context);
}
} else if ("sort".equals(currentFieldName)) {
parseSort(parser, context);
} else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
context.trackScores(parser.booleanValue());
}
}
}
// We need to get the actual source from the request body for highlighting, so parse the request body again
// and only get the doc source.
if (context.highlight() != null) {
parser.close();
currentFieldName = null;
parser = XContentFactory.xContent(source).createParser(source);
token = parser.nextToken();
assert token == XContentParser.Token.START_OBJECT;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("doc".equals(currentFieldName)) {
BytesStreamOutput bStream = new BytesStreamOutput();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
builder.copyCurrentStructure(parser);
builder.close();
doc.setSource(bStream.bytes());
break;
} else {
parser.skipChildren();
}
} else if (token == null) {
break;
}
}
}
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
} finally {
context.types(previousTypes);
if (parser != null) {
parser.close();
}
}
return doc;
}
private void parseSort(XContentParser parser, PercolateContext context) throws Exception {
sortParseElement.parse(parser, context);
// null, means default sorting by relevancy
if (context.sort() == null) {
context.doSort = true;
return new PercolateShardResponse(new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0f), Collections.emptyMap(), Collections.emptyMap(), context);
} else {
throw new ElasticsearchParseException("Only _score desc is supported");
int size = context.size();
if (size > context.searcher().getIndexReader().maxDoc()) {
// prevent easy OOM if more than the total number of docs that exist is requested...
size = context.searcher().getIndexReader().maxDoc();
}
TopScoreDocCollector collector = TopScoreDocCollector.create(size);
context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector));
if (aggregatorCollector != null) {
aggregatorCollector.postCollection();
aggregationPhase.execute(context);
}
private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, IndexService documentIndexService, String index, String type) {
ParsedDocument doc = null;
XContentParser parser = null;
try {
parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc);
MapperService mapperService = documentIndexService.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true));
if (context.highlight() != null) {
doc.setSource(fetchedDoc);
}
} catch (Throwable e) {
throw new ElasticsearchParseException("failed to parse request", e);
} finally {
if (parser != null) {
parser.close();
}
TopDocs topDocs = collector.topDocs();
Map<Integer, String> ids = new HashMap<>(topDocs.scoreDocs.length);
Map<Integer, Map<String, HighlightField>> hls = new HashMap<>(topDocs.scoreDocs.length);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
if (context.trackScores() == false) {
// No sort or tracking scores was provided, so use special value to indicate to not show the scores:
scoreDoc.score = NO_SCORE;
}
if (doc == null) {
throw new ElasticsearchParseException("No doc to percolate in the request");
}
return doc;
}
int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
LeafReaderContext atomicReaderContext = context.searcher().getIndexReader().leaves().get(segmentIdx);
final int segmentDocId = scoreDoc.doc - atomicReaderContext.docBase;
SingleFieldsVisitor fieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME);
atomicReaderContext.reader().document(segmentDocId, fieldsVisitor);
String id = fieldsVisitor.uid().id();
ids.put(scoreDoc.doc, id);
if (context.highlight() != null) {
Query query = queriesRegistry.getPercolateQueries().get(new BytesRef(id));
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
highlightPhase.hitExecute(context, context.hitContext());
hls.put(scoreDoc.doc, context.hitContext().hit().getHighlightFields());
}
}
return new PercolateShardResponse(topDocs, ids, hls, context);
}
}
public void close() {
cache.close();
}
interface PercolatorType {
// 0x00 is reserved for empty type.
byte id();
ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext);
PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested);
}
private final PercolatorType countPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x01;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
long finalCount = 0;
for (PercolateShardResponse shardResponse : shardResults) {
finalCount += shardResponse.count();
private InternalAggregations reduceAggregations(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
if (shardResults.get(0).aggregations() == null) {
return null;
}
List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
for (PercolateShardResponse shardResult : shardResults) {
aggregationsList.add(shardResult.aggregations());
}
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext));
if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = shardResults.get(0).pipelineAggregators();
if (pipelineAggregators != null) {
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new InternalAggregation.ReduceContext(bigArrays, scriptService, headersContext));
newAggs.add(newAgg);
}
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(finalCount, reducedAggregations);
aggregations = new InternalAggregations(newAggs);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
for (Map.Entry<BytesRef, Query> entry : context.percolateQueries().entrySet()) {
try {
Query existsQuery = entry.getValue();
if (isNested) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
if (Lucene.exists(context.docSearcher(), existsQuery)) {
count ++;
}
} catch (Throwable e) {
logger.debug("[" + entry.getKey() + "] failed to execute query", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
}
}
return new PercolateShardResponse(count, context, request.shardId());
}
};
private final PercolatorType queryCountPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x02;
return aggregations;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return countPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Count countCollector = count(logger, context, isNested);
queryBasedPercolating(percolatorSearcher, context, countCollector);
count = countCollector.counter();
} catch (Throwable e) {
logger.warn("failed to execute", e);
} finally {
percolatorSearcher.close();
}
return new PercolateShardResponse(count, context, request.shardId());
}
};
private final PercolatorType matchPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x03;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
long foundMatches = 0;
int numMatches = 0;
for (PercolateShardResponse response : shardResults) {
foundMatches += response.count();
numMatches += response.matches().length;
}
int requestedSize = shardResults.get(0).requestedSize();
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize);
outer:
for (PercolateShardResponse response : shardResults) {
Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
Text match = new Text(new BytesArray(response.matches()[i]));
Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
if (requestedSize != 0 && finalMatches.size() == requestedSize) {
break outer;
}
}
}
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
long count = 0;
List<BytesRef> matches = new ArrayList<>();
List<Map<String, HighlightField>> hls = new ArrayList<>();
for (Map.Entry<BytesRef, Query> entry : context.percolateQueries().entrySet()) {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(entry.getValue()));
context.hitContext().cache().clear();
}
try {
Query existsQuery = entry.getValue();
if (isNested) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
if (Lucene.exists(context.docSearcher(), existsQuery)) {
if (!context.limit || count < context.size()) {
matches.add(entry.getKey());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
count++;
}
} catch (Throwable e) {
logger.debug("[" + entry.getKey() + "] failed to execute query", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
}
}
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId());
}
};
private final PercolatorType queryPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x04;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return matchPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Match match = match(logger, context, highlightPhase, isNested);
queryBasedPercolating(percolatorSearcher, context, match);
List<BytesRef> matches = match.matches();
List<Map<String, HighlightField>> hls = match.hls();
long count = match.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private final PercolatorType scoringPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x05;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
return matchPercolator.reduce(shardResults, headersContext);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase, isNested);
queryBasedPercolating(percolatorSearcher, context, matchAndScore);
List<BytesRef> matches = matchAndScore.matches();
List<Map<String, HighlightField>> hls = matchAndScore.hls();
float[] scores = matchAndScore.scores().toArray();
long count = matchAndScore.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private final PercolatorType topMatchingPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x06;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
long foundMatches = 0;
int nonEmptyResponses = 0;
int firstNonEmptyIndex = 0;
for (int i = 0; i < shardResults.size(); i++) {
PercolateShardResponse response = shardResults.get(i);
foundMatches += response.count();
if (response.matches().length != 0) {
if (firstNonEmptyIndex == 0) {
firstNonEmptyIndex = i;
}
nonEmptyResponses++;
}
}
int requestedSize = shardResults.get(0).requestedSize();
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize);
if (nonEmptyResponses == 1) {
PercolateShardResponse response = shardResults.get(firstNonEmptyIndex);
Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? Float.NaN : response.scores()[i];
Text match = new Text(new BytesArray(response.matches()[i]));
if (!response.hls().isEmpty()) {
Map<String, HighlightField> hl = response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
} else {
finalMatches.add(new PercolateResponse.Match(index, match, score));
}
}
} else {
int[] slots = new int[shardResults.size()];
while (true) {
float lowestScore = Float.NEGATIVE_INFINITY;
int requestIndex = -1;
int itemIndex = -1;
for (int i = 0; i < shardResults.size(); i++) {
int scoreIndex = slots[i];
float[] scores = shardResults.get(i).scores();
if (scoreIndex >= scores.length) {
continue;
}
float score = scores[scoreIndex];
int cmp = Float.compare(lowestScore, score);
// TODO: Maybe add a tie?
if (cmp < 0) {
requestIndex = i;
itemIndex = scoreIndex;
lowestScore = score;
}
}
// This means the shard matches have been exhausted and we should bail
if (requestIndex == -1) {
break;
}
slots[requestIndex]++;
PercolateShardResponse shardResponse = shardResults.get(requestIndex);
Text index = new Text(shardResponse.getIndex());
Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex]));
float score = shardResponse.scores()[itemIndex];
if (!shardResponse.hls().isEmpty()) {
Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
} else {
finalMatches.add(new PercolateResponse.Match(index, match, score));
}
if (finalMatches.size() == requestedSize) {
break;
}
}
}
assert !shardResults.isEmpty();
InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext);
return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context, isNested);
queryBasedPercolating(percolatorSearcher, context, matchAndSort);
TopDocs topDocs = matchAndSort.topDocs();
long count = topDocs.totalHits;
List<BytesRef> matches = new ArrayList<>(topDocs.scoreDocs.length);
float[] scores = new float[topDocs.scoreDocs.length];
List<Map<String, HighlightField>> hls = null;
if (context.highlight() != null) {
hls = new ArrayList<>(topDocs.scoreDocs.length);
}
final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME);
final IndexFieldData<?> uidFieldData = context.fieldData().getForField(uidMapper);
int i = 0;
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves());
LeafReaderContext atomicReaderContext = percolatorSearcher.reader().leaves().get(segmentIdx);
SortedBinaryDocValues values = uidFieldData.load(atomicReaderContext).getBytesValues();
final int localDocId = scoreDoc.doc - atomicReaderContext.docBase;
values.setDocument(localDocId);
final int numValues = values.count();
assert numValues == 1;
BytesRef bytes = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1];
matches.add(BytesRef.deepCopyOf(bytes));
if (hls != null) {
Query query = context.percolateQueries().get(bytes);
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
highlightPhase.hitExecute(context, context.hitContext());
hls.add(i, context.hitContext().hit().getHighlightFields());
}
scores[i++] = scoreDoc.score;
}
if (hls != null) {
return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), hls, count, scores, context, request.shardId());
} else {
return new PercolateShardResponse(matches.toArray(new BytesRef[matches.size()]), count, scores, context, request.shardId());
}
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.close();
}
}
};
private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException {
Query percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter();
final Query filter;
if (context.aliasFilter() != null) {
BooleanQuery.Builder booleanFilter = new BooleanQuery.Builder();
booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST);
booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST);
filter = booleanFilter.build();
} else {
filter = percolatorTypeFilter;
}
Query query = Queries.filtered(context.percolateQuery(), filter);
percolatorSearcher.searcher().search(query, percolateCollector);
percolateCollector.aggregatorCollector.postCollection();
if (context.aggregations() != null) {
aggregationPhase.execute(context);
}
}
public final static class ReduceResult {
private final long count;
@ -852,32 +368,5 @@ public class PercolatorService extends AbstractComponent {
}
}
private InternalAggregations reduceAggregations(List<PercolateShardResponse> shardResults, HasContextAndHeaders headersContext) {
if (shardResults.get(0).aggregations() == null) {
return null;
}
List<InternalAggregations> aggregationsList = new ArrayList<>(shardResults.size());
for (PercolateShardResponse shardResult : shardResults) {
aggregationsList.add(shardResult.aggregations());
}
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService,
headersContext));
if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = shardResults.get(0).pipelineAggregators();
if (pipelineAggregators != null) {
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> {
return (InternalAggregation) p;
}).collect(Collectors.toList());
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(
bigArrays, scriptService, headersContext));
newAggs.add(newAgg);
}
aggregations = new InternalAggregations(newAggs);
}
}
return aggregations;
}
}

View File

@ -1,404 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.percolator;
import com.carrotsearch.hppc.FloatArrayList;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
/**
*/
abstract class QueryCollector extends SimpleCollector {
final IndexFieldData<?> uidFieldData;
final IndexSearcher searcher;
final ConcurrentMap<BytesRef, Query> queries;
final ESLogger logger;
boolean isNestedDoc = false;
BytesRef current;
SortedBinaryDocValues values;
final BucketCollector aggregatorCollector;
LeafCollector aggregatorLeafCollector;
QueryCollector(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
this.logger = logger;
this.queries = context.percolateQueries();
this.searcher = context.docSearcher();
final MappedFieldType uidMapper = context.mapperService().fullName(UidFieldMapper.NAME);
this.uidFieldData = context.fieldData().getForField(uidMapper);
this.isNestedDoc = isNestedDoc;
List<Aggregator> aggregatorCollectors = new ArrayList<>();
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
context.aggregations().factories().init(aggregationContext);
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators();
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
aggregatorCollectors.add(aggregator);
}
}
context.aggregations().aggregators(aggregators);
}
aggregatorCollector = BucketCollector.wrap(aggregatorCollectors);
aggregatorCollector.preCollection();
}
public void postMatch(int doc) throws IOException {
aggregatorLeafCollector.collect(doc);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
aggregatorLeafCollector.setScorer(scorer);
}
@Override
public boolean needsScores() {
return aggregatorCollector.needsScores();
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
// we use the UID because id might not be indexed
values = uidFieldData.load(context).getBytesValues();
aggregatorLeafCollector = aggregatorCollector.getLeafCollector(context);
}
static Match match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
return new Match(logger, context, highlightPhase, isNestedDoc);
}
static Count count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
return new Count(logger, context, isNestedDoc);
}
static MatchAndScore matchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
return new MatchAndScore(logger, context, highlightPhase, isNestedDoc);
}
static MatchAndSort matchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
return new MatchAndSort(logger, context, isNestedDoc);
}
protected final Query getQuery(int doc) {
values.setDocument(doc);
final int numValues = values.count();
if (numValues == 0) {
return null;
}
assert numValues == 1;
current = Uid.splitUidIntoTypeAndId(values.valueAt(0))[1];
return queries.get(current);
}
final static class Match extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<>();
final List<Map<String, HighlightField>> hls = new ArrayList<>();
final boolean limit;
final int size;
long counter = 0;
Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
this.limit = context.limit;
this.size = context.size();
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
}
if (Lucene.exists(searcher, existsQuery)) {
if (!limit || counter < size) {
matches.add(BytesRef.deepCopyOf(current));
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class MatchAndSort extends QueryCollector {
private final TopScoreDocCollector topDocsCollector;
private LeafCollector topDocsLeafCollector;
MatchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
// TODO: Use TopFieldCollector.create(...) for ascending and descending scoring?
topDocsCollector = TopScoreDocCollector.create(context.size());
}
@Override
public boolean needsScores() {
return super.needsScores() || topDocsCollector.needsScores();
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (Lucene.exists(searcher, existsQuery)) {
topDocsLeafCollector.collect(doc);
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
super.doSetNextReader(context);
topDocsLeafCollector = topDocsCollector.getLeafCollector(context);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
topDocsLeafCollector.setScorer(scorer);
}
TopDocs topDocs() {
return topDocsCollector.topDocs();
}
}
final static class MatchAndScore extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<>();
final List<Map<String, HighlightField>> hls = new ArrayList<>();
// TODO: Use thread local in order to cache the scores lists?
final FloatArrayList scores = new FloatArrayList();
final boolean limit;
final int size;
long counter = 0;
private Scorer scorer;
MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
this.limit = context.limit;
this.size = context.size();
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public boolean needsScores() {
return true;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query));
context.hitContext().cache().clear();
}
if (Lucene.exists(searcher, existsQuery)) {
if (!limit || counter < size) {
matches.add(BytesRef.deepCopyOf(current));
scores.add(scorer.score());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
FloatArrayList scores() {
return scores;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class Count extends QueryCollector {
private long counter = 0;
Count(ESLogger logger, PercolateContext context, boolean isNestedDoc) throws IOException {
super(logger, context, isNestedDoc);
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
Query existsQuery = query;
if (isNestedDoc) {
existsQuery = new BooleanQuery.Builder()
.add(existsQuery, Occur.MUST)
.add(Queries.newNonNestedFilter(), Occur.FILTER)
.build();
}
// run the query
try {
if (Lucene.exists(searcher, existsQuery)) {
counter++;
postMatch(doc);
}
} catch (IOException e) {
logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
}
}

View File

@ -40,7 +40,6 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.io.IOException;
import java.util.List;
@ -100,9 +99,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
case SETTINGS:
writeSettings(response.settings().get(index), builder, request);
break;
case WARMERS:
writeWarmers(response.warmers().get(index), builder, request);
break;
default:
throw new IllegalStateException("feature [" + feature + "] is not valid");
}
@ -142,15 +138,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
builder.endObject();
}
private void writeWarmers(List<IndexWarmersMetaData.Entry> warmers, XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.WARMERS);
if (warmers != null) {
for (IndexWarmersMetaData.Entry warmer : warmers) {
IndexWarmersMetaData.toXContent(warmer, builder, params);
}
}
builder.endObject();
}
});
}

View File

@ -1,58 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.delete;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import static org.elasticsearch.rest.RestRequest.Method.DELETE;
/**
*/
public class RestDeleteWarmerAction extends BaseRestHandler {
@Inject
public RestDeleteWarmerAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client);
controller.registerHandler(DELETE, "/{index}/_warmer", this);
controller.registerHandler(DELETE, "/{index}/_warmer/{name}", this);
controller.registerHandler(DELETE, "/{index}/_warmers", this);
controller.registerHandler(DELETE, "/{index}/_warmers/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name")))
.indices(Strings.splitStringByCommaToArray(request.param("index")));
deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout()));
deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout()));
deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions()));
client.admin().indices().deleteWarmer(deleteWarmerRequest, new AcknowledgedRestListener<DeleteWarmerResponse>(channel));
}
}

View File

@ -1,92 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.get;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import java.util.List;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestStatus.OK;
/**
*
*/
public class RestGetWarmerAction extends BaseRestHandler {
@Inject
public RestGetWarmerAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client);
controller.registerHandler(GET, "/_warmer/{name}", this);
controller.registerHandler(GET, "/{index}/_warmer/{name}", this);
controller.registerHandler(GET, "/{index}/_warmers/{name}", this);
controller.registerHandler(GET, "/{index}/{type}/_warmer/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final String[] types = Strings.splitStringByCommaToArray(request.param("type"));
final String[] names = request.paramAsStringArray("name", Strings.EMPTY_ARRAY);
GetWarmersRequest getWarmersRequest = new GetWarmersRequest();
getWarmersRequest.indices(indices).types(types).warmers(names);
getWarmersRequest.local(request.paramAsBoolean("local", getWarmersRequest.local()));
getWarmersRequest.indicesOptions(IndicesOptions.fromRequest(request, getWarmersRequest.indicesOptions()));
client.admin().indices().getWarmers(getWarmersRequest, new RestBuilderListener<GetWarmersResponse>(channel) {
@Override
public RestResponse buildResponse(GetWarmersResponse response, XContentBuilder builder) throws Exception {
if (indices.length > 0 && response.warmers().isEmpty()) {
return new BytesRestResponse(OK, builder.startObject().endObject());
}
builder.startObject();
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry : response.warmers()) {
builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE);
for (IndexWarmersMetaData.Entry warmerEntry : entry.value) {
IndexWarmersMetaData.toXContent(warmerEntry, builder, request);
}
builder.endObject();
builder.endObject();
}
builder.endObject();
return new BytesRestResponse(OK, builder);
}
});
}
}

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices.warmer.put;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
/**
*/
public class RestPutWarmerAction extends BaseRestHandler {
private final IndicesQueriesRegistry queryRegistry;
@Inject
public RestPutWarmerAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry) {
super(settings, controller, client);
this.queryRegistry = queryRegistry;
controller.registerHandler(PUT, "/_warmer/{name}", this);
controller.registerHandler(PUT, "/{index}/_warmer/{name}", this);
controller.registerHandler(PUT, "/{index}/{type}/_warmer/{name}", this);
controller.registerHandler(PUT, "/_warmers/{name}", this);
controller.registerHandler(PUT, "/{index}/_warmers/{name}", this);
controller.registerHandler(PUT, "/{index}/{type}/_warmers/{name}", this);
controller.registerHandler(POST, "/_warmer/{name}", this);
controller.registerHandler(POST, "/{index}/_warmer/{name}", this);
controller.registerHandler(POST, "/{index}/{type}/_warmer/{name}", this);
controller.registerHandler(POST, "/_warmers/{name}", this);
controller.registerHandler(POST, "/{index}/_warmers/{name}", this);
controller.registerHandler(POST, "/{index}/{type}/_warmers/{name}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name"));
BytesReference sourceBytes = RestActions.getRestContent(request);
SearchSourceBuilder source = RestActions.getRestSearchSource(sourceBytes, queryRegistry, parseFieldMatcher);
SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")))
.types(Strings.splitStringByCommaToArray(request.param("type")))
.requestCache(request.paramAsBoolean("request_cache", null)).source(source);
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
putWarmerRequest.searchRequest(searchRequest);
putWarmerRequest.timeout(request.paramAsTime("timeout", putWarmerRequest.timeout()));
putWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putWarmerRequest.masterNodeTimeout()));
client.admin().indices().putWarmer(putWarmerRequest, new AcknowledgedRestListener<>(channel));
}
}

View File

@ -29,7 +29,6 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -95,7 +94,6 @@ import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QueryPhase;
@ -103,7 +101,6 @@ import org.elasticsearch.search.query.QuerySearchRequest;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.query.ScrollQuerySearchResult;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
@ -202,7 +199,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
this.indicesWarmer.addListener(new NormsWarmer(indicesWarmer));
this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer));
this.indicesWarmer.addListener(new SearchWarmer());
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
@ -1167,76 +1163,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
}
}
class SearchWarmer implements IndicesWarmer.Listener {
@Override
public TerminationHandle warmNewReaders(IndexShard indexShard, final Engine.Searcher searcher) {
return internalWarm(indexShard, searcher, false);
}
@Override
public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) {
return internalWarm(indexShard, searcher, true);
}
public TerminationHandle internalWarm(final IndexShard indexShard, final Engine.Searcher searcher, final boolean top) {
IndexWarmersMetaData custom = indexShard.getIndexSettings().getIndexMetaData().custom(IndexWarmersMetaData.TYPE);
if (custom == null) {
return TerminationHandle.NO_WAIT;
}
final Executor executor = indicesWarmer.getExecutor();
final CountDownLatch latch = new CountDownLatch(custom.entries().size());
for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
executor.execute(() -> {
SearchContext context = null;
try {
long now = System.nanoTime();
final IndexService indexService = indicesService.indexServiceSafe(indexShard.shardId().index().name());
QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry());
queryParseContext.parseFieldMatcher(indexService.getIndexSettings().getParseFieldMatcher());
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexShard.getIndexSettings()
.getNumberOfShards(),
SearchType.QUERY_THEN_FETCH, entry.source().build(queryParseContext), entry.types(), entry.requestCache());
context = createContext(request, searcher);
// if we use sort, we need to do query to sort on
// it and load relevant field data
// if not, we might as well set size=0 (and cache
// if needed)
if (context.sort() == null) {
context.size(0);
}
boolean canCache = indicesQueryCache.canCache(request, context);
// early terminate when we can cache, since we
// can only do proper caching on top level searcher
// also, if we can't cache, and its top, we don't
// need to execute it, since we already did when its
// not top
if (canCache != top) {
return;
}
loadOrExecuteQueryPhase(request, context, queryPhase);
long took = System.nanoTime() - now;
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
} finally {
try {
if (context != null) {
freeContext(context.id());
cleanContext(context);
}
} finally {
latch.countDown();
}
}
});
}
return () -> latch.await();
}
}
class Reaper implements Runnable {
@Override
public void run() {

View File

@ -1,61 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.warmer;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Arrays;
/**
*
*/
public class IndexWarmerMissingException extends ElasticsearchException {
private final String[] names;
public IndexWarmerMissingException(String... names) {
super("index_warmer " + Arrays.toString(names) + " missing");
this.names = names;
}
public String[] names() {
return this.names;
}
public IndexWarmerMissingException(StreamInput in) throws IOException{
super(in);
names = in.readStringArray();
}
@Override
public RestStatus status() {
return RestStatus.NOT_FOUND;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(names);
}
}

View File

@ -1,354 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.warmer;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentGenerator;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
*/
public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom> implements IndexMetaData.Custom {
public static final String TYPE = "warmers";
public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData();
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexWarmersMetaData that = (IndexWarmersMetaData) o;
return entries.equals(that.entries);
}
@Override
public int hashCode() {
return entries.hashCode();
}
public static class Entry {
private final String name;
private final String[] types;
private final SearchSource source;
private final Boolean requestCache;
public Entry(String name, String[] types, Boolean requestCache, SearchSource source) {
this.name = name;
this.types = types == null ? Strings.EMPTY_ARRAY : types;
this.source = source;
this.requestCache = requestCache;
}
public String name() {
return this.name;
}
public String[] types() {
return this.types;
}
@Nullable
public SearchSource source() {
return this.source;
}
@Nullable
public Boolean requestCache() {
return this.requestCache;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry entry = (Entry) o;
if (!name.equals(entry.name)) return false;
if (!Arrays.equals(types, entry.types)) return false;
if (!source.equals(entry.source)) return false;
return Objects.equals(requestCache, entry.requestCache);
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + Arrays.hashCode(types);
result = 31 * result + source.hashCode();
result = 31 * result + (requestCache != null ? requestCache.hashCode() : 0);
return result;
}
}
private final List<Entry> entries;
public IndexWarmersMetaData(Entry... entries) {
this.entries = Arrays.asList(entries);
}
public List<Entry> entries() {
return this.entries;
}
@Override
public String type() {
return TYPE;
}
@Override
public IndexWarmersMetaData readFrom(StreamInput in) throws IOException {
Entry[] entries = new Entry[in.readVInt()];
for (int i = 0; i < entries.length; i++) {
String name = in.readString();
String[] types = in.readStringArray();
SearchSource source = null;
if (in.readBoolean()) {
source = new SearchSource(in);
}
Boolean queryCache;
queryCache = in.readOptionalBoolean();
entries[i] = new Entry(name, types, queryCache, source);
}
return new IndexWarmersMetaData(entries);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(entries().size());
for (Entry entry : entries()) {
out.writeString(entry.name());
out.writeStringArray(entry.types());
if (entry.source() == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
entry.source.writeTo(out);
}
out.writeOptionalBoolean(entry.requestCache());
}
}
@Override
public IndexWarmersMetaData fromMap(Map<String, Object> map) throws IOException {
// if it starts with the type, remove it
if (map.size() == 1 && map.containsKey(TYPE)) {
map = (Map<String, Object>) map.values().iterator().next();
}
XContentBuilder builder = XContentFactory.smileBuilder().map(map);
try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) {
// move to START_OBJECT
parser.nextToken();
return fromXContent(parser);
}
}
@Override
public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException {
// we get here after we are at warmers token
String currentFieldName = null;
XContentParser.Token token;
List<Entry> entries = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String name = currentFieldName;
List<String> types = new ArrayList<>(2);
SearchSource source = null;
Boolean queryCache = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("types".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
types.add(parser.text());
}
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("source".equals(currentFieldName)) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out)) {
generator.copyCurrentStructure(parser);
}
source = new SearchSource(new BytesArray(out.toByteArray()));
}
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
if ("source".equals(currentFieldName)) {
source = new SearchSource(new BytesArray(parser.binaryValue()));
}
} else if (token.isValue()) {
if ("requestCache".equals(currentFieldName) || "request_cache".equals(currentFieldName)) {
queryCache = parser.booleanValue();
}
}
}
entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source));
}
}
return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
//No need, IndexMetaData already writes it
//builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE);
for (Entry entry : entries()) {
toXContent(entry, builder, params);
}
//No need, IndexMetaData already writes it
//builder.endObject();
return builder;
}
public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("types", entry.types());
if (entry.requestCache() != null) {
builder.field("requestCache", entry.requestCache());
}
builder.field("source", entry.source());
builder.endObject();
}
@Override
public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) {
IndexWarmersMetaData second = (IndexWarmersMetaData) other;
List<Entry> entries = new ArrayList<>();
entries.addAll(entries());
for (Entry secondEntry : second.entries()) {
boolean found = false;
for (Entry firstEntry : entries()) {
if (firstEntry.name().equals(secondEntry.name())) {
found = true;
break;
}
}
if (!found) {
entries.add(secondEntry);
}
}
return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
}
public static class SearchSource extends ToXContentToBytes implements Writeable<SearchSource> {
private final BytesReference binary;
private SearchSourceBuilder cached;
public SearchSource(BytesReference bytesArray) {
if (bytesArray == null) {
throw new IllegalArgumentException("bytesArray must not be null");
}
this.binary = bytesArray;
}
public SearchSource(StreamInput input) throws IOException {
this(input.readBytesReference());
}
public SearchSource(SearchSourceBuilder source) {
try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) {
source.toXContent(builder, ToXContent.EMPTY_PARAMS);
binary = builder.bytes();
} catch (IOException ex) {
throw new ElasticsearchException("failed to generate XContent", ex);
}
}
public SearchSourceBuilder build(QueryParseContext ctx) throws IOException {
if (cached == null) {
try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) {
ctx.reset(parser);
cached = SearchSourceBuilder.parseSearchSource(parser, ctx);
}
}
return cached;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (binary == null) {
cached.toXContent(builder, params);
} else {
try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) {
builder.copyCurrentStructure(parser);
}
}
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBytesReference(binary);
}
@Override
public SearchSource readFrom(StreamInput in) throws IOException {
return new SearchSource(in);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SearchSource that = (SearchSource) o;
return binary.equals(that.binary);
}
@Override
public int hashCode() {
return binary.hashCode();
}
}
}

View File

@ -71,7 +71,6 @@ import org.elasticsearch.search.SearchException;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
import org.elasticsearch.snapshots.SnapshotException;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.TestSearchContext;
@ -494,12 +493,6 @@ public class ExceptionSerializationTests extends ESTestCase {
assertEquals("[_na] msg", ex.getMessage());
}
public void testIndexWarmerMissingException() throws IOException {
IndexWarmerMissingException ex = serialize(new IndexWarmerMissingException("w1", "w2"));
assertEquals("index_warmer [w1, w2] missing", ex.getMessage());
assertArrayEquals(new String[]{"w1", "w2"}, ex.names());
}
public void testIndexTemplateMissingException() throws IOException {
IndexTemplateMissingException ex = serialize(new IndexTemplateMissingException("name"));
assertEquals("index_template [name] missing", ex.getMessage());
@ -735,7 +728,6 @@ public class ExceptionSerializationTests extends ESTestCase {
ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class);
ids.put(91, org.elasticsearch.search.aggregations.AggregationInitializationException.class);
ids.put(92, org.elasticsearch.indices.recovery.DelayRecoveryException.class);
ids.put(93, org.elasticsearch.search.warmer.IndexWarmerMissingException.class);
ids.put(94, org.elasticsearch.client.transport.NoNodeAvailableException.class);
ids.put(95, null);
ids.put(96, org.elasticsearch.snapshots.InvalidSnapshotNameException.class);

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.ArrayList;
@ -52,7 +51,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertAcked(prepareCreate("idx").addAlias(new Alias("alias_idx")).addMapping("type1", "{\"type1\":{}}")
.setSettings(Settings.builder().put("number_of_shards", 1)).get());
ensureSearchable("idx");
assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("idx")).get());
createIndex("empty_idx");
ensureSearchable("idx", "empty_idx");
}
@ -66,7 +64,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertAliases(response, "idx");
assertMappings(response, "idx");
assertSettings(response, "idx");
assertWarmers(response, "idx");
}
public void testSimpleUnknownIndex() {
@ -87,7 +84,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertEmptyAliases(response);
assertEmptyOrOnlyDefaultMappings(response, "empty_idx");
assertNonEmptySettings(response, "empty_idx");
assertEmptyWarmers(response);
}
public void testSimpleMapping() {
@ -100,7 +96,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertMappings(response, "idx");
assertEmptyAliases(response);
assertEmptySettings(response);
assertEmptyWarmers(response);
}
public void testSimpleAlias() {
@ -113,7 +108,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertAliases(response, "idx");
assertEmptyMappings(response);
assertEmptySettings(response);
assertEmptyWarmers(response);
}
public void testSimpleSettings() {
@ -126,20 +120,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertSettings(response, "idx");
assertEmptyAliases(response);
assertEmptyMappings(response);
assertEmptyWarmers(response);
}
public void testSimpleWarmer() {
GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
Feature.WARMERS);
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
assertThat(indices[0], equalTo("idx"));
assertWarmers(response, "idx");
assertEmptyAliases(response);
assertEmptyMappings(response);
assertEmptySettings(response);
}
public void testSimpleMixedFeatures() {
@ -169,11 +149,6 @@ public class GetIndexIT extends ESIntegTestCase {
} else {
assertEmptySettings(response);
}
if (features.contains(Feature.WARMERS)) {
assertWarmers(response, "idx");
} else {
assertEmptyWarmers(response);
}
}
public void testEmptyMixedFeatures() {
@ -199,7 +174,6 @@ public class GetIndexIT extends ESIntegTestCase {
} else {
assertEmptySettings(response);
}
assertEmptyWarmers(response);
}
public void testGetIndexWithBlocks() {
@ -235,18 +209,6 @@ public class GetIndexIT extends ESIntegTestCase {
}
}
private void assertWarmers(GetIndexResponse response, String indexName) {
ImmutableOpenMap<String, List<Entry>> warmers = response.warmers();
assertThat(warmers, notNullValue());
assertThat(warmers.size(), equalTo(1));
List<Entry> indexWarmers = warmers.get(indexName);
assertThat(indexWarmers, notNullValue());
assertThat(indexWarmers.size(), equalTo(1));
Entry warmer = indexWarmers.get(0);
assertThat(warmer, notNullValue());
assertThat(warmer.name(), equalTo("warmer1"));
}
private void assertSettings(GetIndexResponse response, String indexName) {
ImmutableOpenMap<String, Settings> settings = response.settings();
assertThat(settings, notNullValue());
@ -305,11 +267,6 @@ public class GetIndexIT extends ESIntegTestCase {
assertThat(alias.alias(), equalTo("alias_idx"));
}
private void assertEmptyWarmers(GetIndexResponse response) {
assertThat(response.warmers(), notNullValue());
assertThat(response.warmers().isEmpty(), equalTo(true));
}
private void assertEmptySettings(GetIndexResponse response) {
assertThat(response.settings(), notNullValue());
assertThat(response.settings().isEmpty(), equalTo(true));

View File

@ -1,35 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.warmer.put;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.hasSize;
public class PutWarmerRequestTests extends ESTestCase {
// issue 4196
public void testThatValidationWithoutSpecifyingSearchRequestFails() {
PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo");
ActionRequestValidationException validationException = putWarmerRequest.validate();
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(validationException.getMessage(), containsString("search request is missing"));
}
}

View File

@ -65,6 +65,7 @@ import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@ -75,9 +76,13 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ -486,7 +491,39 @@ public class TransportReplicationActionTests extends ESTestCase {
replicationPhase.run();
final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests();
transport.clear();
assertThat(capturedRequests.length, equalTo(assignedReplicas));
HashMap<String, Request> nodesSentTo = new HashMap<>();
boolean executeOnReplica =
action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings());
for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) {
// no duplicate requests
Request replicationRequest = (Request) capturedRequest.request;
assertNull(nodesSentTo.put(capturedRequest.node.getId(), replicationRequest));
// the request is hitting the correct shard
assertEquals(request.shardId, replicationRequest.shardId);
}
// no request was sent to the local node
assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId())));
// requests were sent to the correct shard copies
for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id())) {
if (shard.primary() == false && executeOnReplica == false) {
continue;
}
if (shard.unassigned()) {
continue;
}
if (shard.primary() == false) {
nodesSentTo.remove(shard.currentNodeId());
}
if (shard.relocating()) {
nodesSentTo.remove(shard.relocatingNodeId());
}
}
assertThat(nodesSentTo.entrySet(), is(empty()));
if (assignedReplicas > 0) {
assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false));
}
@ -511,6 +548,12 @@ public class TransportReplicationActionTests extends ESTestCase {
transport.clear();
assertEquals(1, shardFailedRequests.length);
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
// get the shard the request was sent to
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id());
// and the shard that was requested to be failed
ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry)shardFailedRequest.request;
// the shard the request was sent to and the shard to be failed should be the same
assertEquals(shardRoutingEntry.getShardRouting(), routing);
failures.add(shardFailedRequest);
transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE);
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
import org.elasticsearch.test.ESBackcompatTestCase;
import java.util.List;
@ -88,21 +87,4 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase {
assertThat(settings.get("index.number_of_shards"), equalTo("1"));
}
public void testGetWarmers() throws Exception {
createIndex("test");
ensureSearchable("test");
assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("test")).get());
ensureSearchable("test");
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS)
.execute().actionGet();
ImmutableOpenMap<String, List<Entry>> warmersMap = getIndexResponse.warmers();
assertThat(warmersMap, notNullValue());
assertThat(warmersMap.size(), equalTo(1));
List<Entry> warmersList = warmersMap.get("test");
assertThat(warmersList, notNullValue());
assertThat(warmersList.size(), equalTo(1));
Entry warmer = warmersList.get(0);
assertThat(warmer, notNullValue());
assertThat(warmer.name(), equalTo("warmer1"));
}
}

View File

@ -32,20 +32,25 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.MultiDataPathUpgrader;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.Segment;
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.MergePolicyConfig;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
@ -423,4 +428,62 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
UpgradeIT.assertUpgraded(client(), indexName);
}
private Path getNodeDir(String indexFile) throws IOException {
Path unzipDir = createTempDir();
Path unzipDataDir = unzipDir.resolve("data");
// decompress the index
Path backwardsIndex = getBwcIndicesPath().resolve(indexFile);
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
TestUtil.unzip(stream, unzipDir);
}
// check it is unique
assertTrue(Files.exists(unzipDataDir));
Path[] list = FileSystemUtils.files(unzipDataDir);
if (list.length != 1) {
throw new IllegalStateException("Backwards index must contain exactly one cluster");
}
// the bwc scripts packs the indices under this path
return list[0].resolve("nodes/0/");
}
public void testOldClusterStates() throws Exception {
// dangling indices do not load the global state, only the per-index states
// so we make sure we can read them separately
MetaDataStateFormat<MetaData> globalFormat = new MetaDataStateFormat<MetaData>(XContentType.JSON, "global-") {
@Override
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public MetaData fromXContent(XContentParser parser) throws IOException {
return MetaData.Builder.fromXContent(parser);
}
};
MetaDataStateFormat<IndexMetaData> indexFormat = new MetaDataStateFormat<IndexMetaData>(XContentType.JSON, "state-") {
@Override
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
return IndexMetaData.Builder.fromXContent(parser);
}
};
Collections.shuffle(indexes, random());
for (String indexFile : indexes) {
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
Path nodeDir = getNodeDir(indexFile);
logger.info("Parsing cluster state files from index [" + indexName + "]");
assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception
Path indexDir = nodeDir.resolve("indices").resolve(indexName);
assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception
}
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -51,9 +52,12 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@ -61,6 +65,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -796,7 +801,92 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertTrue(published.get());
}
public void testClusterStateBatchedUpdates() throws InterruptedException {
// test that for a single thread, tasks are executed in the order
// that they are submitted
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
List<Integer> tasks = new ArrayList<>();
@Override
public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
this.tasks.addAll(tasks);
return BatchResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
}
int numberOfThreads = randomIntBetween(2, 8);
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++) {
executors[i] = new TaskExecutor();
}
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure: [{}]", t, source);
failures.add(new Tuple<>(source, t));
updateLatch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateLatch.countDown();
}
};
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
updateLatch.await();
assertThat(failures, empty());
for (int i = 0; i < numberOfThreads; i++) {
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assertNotNull(executors[i].tasks.get(j));
assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j));
}
}
}
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
@ -884,19 +974,12 @@ public class ClusterServiceIT extends ESIntegTestCase {
counts.merge(executor, 1, (previous, one) -> previous + one);
}
CountDownLatch startGate = new CountDownLatch(1);
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
AtomicBoolean interrupted = new AtomicBoolean();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
try {
startGate.await();
} catch (InterruptedException e) {
interrupted.set(true);
return;
}
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
clusterService.submitStateUpdateTask(
@ -906,16 +989,18 @@ public class ClusterServiceIT extends ESIntegTestCase {
executor,
listener);
}
} finally {
endGate.countDown();
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
startGate.countDown();
endGate.await();
assertFalse(interrupted.get());
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();

View File

@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
@ -49,7 +48,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Collections;
@ -492,9 +490,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
builder.settings(settingsBuilder);
builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10));
int aliasCount = randomInt(10);
if (randomBoolean()) {
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
}
for (int i = 0; i < aliasCount; i++) {
builder.putAlias(randomAlias());
}
@ -504,7 +499,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
@Override
public IndexMetaData randomChange(IndexMetaData part) {
IndexMetaData.Builder builder = IndexMetaData.builder(part);
switch (randomIntBetween(0, 3)) {
switch (randomIntBetween(0, 2)) {
case 0:
builder.settings(Settings.builder().put(part.getSettings()).put(randomSettings(Settings.EMPTY)));
break;
@ -518,9 +513,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
case 2:
builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
break;
case 3:
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
break;
default:
throw new IllegalArgumentException("Shouldn't be here");
}
@ -529,23 +521,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
});
}
/**
* Generates a random warmer
*/
private IndexWarmersMetaData randomWarmers() {
if (randomBoolean()) {
return new IndexWarmersMetaData(
new IndexWarmersMetaData.Entry(
randomName("warm"),
new String[]{randomName("type")},
randomBoolean(),
new IndexWarmersMetaData.SearchSource(new BytesArray(randomAsciiOfLength(1000))))
);
} else {
return new IndexWarmersMetaData();
}
}
/**
* Randomly adds, deletes or updates index templates in the metadata
*/
@ -576,9 +551,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
for (int i = 0; i < aliasCount; i++) {
builder.putAlias(randomAlias());
}
if (randomBoolean()) {
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
}
return builder.build();
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.ack;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
@ -27,9 +26,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.AliasMetaData;
@ -42,12 +38,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
@ -88,83 +81,6 @@ public class AckIT extends ESIntegTestCase {
assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
}
public void testPutWarmerAcknowledgement() {
createIndex("test");
// make sure one shard is started so the search during put warmer will not fail
index("test", "type", "1", "f", 1);
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
assertThat(getWarmersResponse.warmers().size(), equalTo(1));
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
assertThat(entry.key, equalTo("test"));
assertThat(entry.value.size(), equalTo(1));
assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
}
}
public void testPutWarmerNoAcknowledgement() throws InterruptedException {
createIndex("test");
// make sure one shard is started so the search during put warmer will not fail
index("test", "type", "1", "f", 1);
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
/* Since we don't wait for the ack here we have to wait until the search request has been executed from the master
* otherwise the test infra might have already deleted the index and the search request fails on all shards causing
* the test to fail too. We simply wait until the the warmer has been installed and also clean it up afterwards.*/
assertTrue(awaitBusy(() -> {
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
if (getWarmersResponse.warmers().size() != 1) {
return false;
}
}
return true;
}));
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
}
public void testDeleteWarmerAcknowledgement() {
createIndex("test");
index("test", "type", "1", "f", 1);
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
assertThat(getWarmersResponse.warmers().size(), equalTo(0));
}
}
public void testDeleteWarmerNoAcknowledgement() throws InterruptedException {
createIndex("test");
index("test", "type", "1", "f", 1);
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").setTimeout("0s").get();
assertFalse(deleteWarmerResponse.isAcknowledged());
assertTrue(awaitBusy(() -> {
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
if (getWarmersResponse.warmers().size() > 0) {
return false;
}
}
return true;
}));
}
public void testClusterRerouteAcknowledgement() throws InterruptedException {
assertAcked(prepareCreate("test").setSettings(Settings.builder()
.put(indexSettings())

View File

@ -381,8 +381,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
assertThat(primary, notNullValue());
String fromId = primary.currentNodeId();
String toId = r.relocatingNodeId();
logger.error("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
logger.error(routingNodes.prettyPrint());
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
}
}

View File

@ -31,7 +31,10 @@ import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@ -42,6 +45,8 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
public class CacheTests extends ESTestCase {
private int numberOfEntries;
@ -483,7 +488,7 @@ public class CacheTests extends ESTestCase {
return value;
});
} catch (ExecutionException e) {
fail(e.getMessage());
throw new AssertionError(e);
}
}
for (int i = 0; i < numberOfEntries; i++) {
@ -491,25 +496,21 @@ public class CacheTests extends ESTestCase {
}
}
public void testComputeIfAbsentCallsOnce() throws InterruptedException {
public void testComputeIfAbsentCallsOnce() throws BrokenBarrierException, InterruptedException {
int numberOfThreads = randomIntBetween(2, 32);
final Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
AtomicReferenceArray flags = new AtomicReferenceArray(numberOfEntries);
for (int j = 0; j < numberOfEntries; j++) {
flags.set(j, false);
}
CountDownLatch startGate = new CountDownLatch(1);
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
AtomicBoolean interrupted = new AtomicBoolean();
CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
Thread thread = new Thread(() -> {
try {
try {
startGate.await();
} catch (InterruptedException e) {
interrupted.set(true);
return;
}
barrier.await();
for (int j = 0; j < numberOfEntries; j++) {
try {
cache.computeIfAbsent(j, key -> {
@ -517,18 +518,24 @@ public class CacheTests extends ESTestCase {
return Integer.toString(key);
});
} catch (ExecutionException e) {
throw new RuntimeException(e);
failures.add(e);
break;
}
}
} finally {
endGate.countDown();
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
startGate.countDown();
endGate.await();
assertFalse(interrupted.get());
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
assertThat(failures, is(empty()));
}
public void testComputeIfAbsentThrowsExceptionIfLoaderReturnsANullValue() {
@ -541,7 +548,7 @@ public class CacheTests extends ESTestCase {
}
}
public void testDependentKeyDeadlock() throws InterruptedException {
public void testDependentKeyDeadlock() throws BrokenBarrierException, InterruptedException {
class Key {
private final int key;
@ -568,18 +575,19 @@ public class CacheTests extends ESTestCase {
int numberOfThreads = randomIntBetween(2, 32);
final Cache<Key, Integer> cache = CacheBuilder.<Key, Integer>builder().build();
CountDownLatch startGate = new CountDownLatch(1);
CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
CountDownLatch deadlockLatch = new CountDownLatch(numberOfThreads);
AtomicBoolean interrupted = new AtomicBoolean();
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
Thread thread = new Thread(() -> {
try {
try {
startGate.await();
} catch (InterruptedException e) {
interrupted.set(true);
return;
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
Random random = new Random(random().nextLong());
for (int j = 0; j < numberOfEntries; j++) {
@ -594,7 +602,8 @@ public class CacheTests extends ESTestCase {
}
});
} catch (ExecutionException e) {
fail(e.getMessage());
failures.add(e);
break;
}
}
} finally {
@ -631,7 +640,7 @@ public class CacheTests extends ESTestCase {
}, 1, 1, TimeUnit.SECONDS);
// everything is setup, release the hounds
startGate.countDown();
barrier.await();
// wait for either deadlock to be detected or the threads to terminate
deadlockLatch.await();
@ -639,24 +648,21 @@ public class CacheTests extends ESTestCase {
// shutdown the watchdog service
scheduler.shutdown();
assertThat(failures, is(empty()));
assertFalse("deadlock", deadlock.get());
}
public void testCachePollution() throws InterruptedException {
public void testCachePollution() throws BrokenBarrierException, InterruptedException {
int numberOfThreads = randomIntBetween(2, 32);
final Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
CountDownLatch startGate = new CountDownLatch(1);
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
AtomicBoolean interrupted = new AtomicBoolean();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
Thread thread = new Thread(() -> {
try {
try {
startGate.await();
} catch (InterruptedException e) {
interrupted.set(true);
return;
}
barrier.await();
Random random = new Random(random().nextLong());
for (int j = 0; j < numberOfEntries; j++) {
Integer key = random.nextInt(numberOfEntries);
@ -686,21 +692,23 @@ public class CacheTests extends ESTestCase {
cache.get(key);
}
}
} finally {
endGate.countDown();
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
startGate.countDown();
endGate.await();
assertFalse(interrupted.get());
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
}
// test that the cache is not corrupted under lots of concurrent modifications, even hitting the same key
// here be dragons: this test did catch one subtle bug during development; do not remove lightly
public void testTorture() throws InterruptedException {
public void testTorture() throws BrokenBarrierException, InterruptedException {
int numberOfThreads = randomIntBetween(2, 32);
final Cache<Integer, String> cache =
CacheBuilder.<Integer, String>builder()
@ -708,32 +716,28 @@ public class CacheTests extends ESTestCase {
.weigher((k, v) -> 2)
.build();
CountDownLatch startGate = new CountDownLatch(1);
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
AtomicBoolean interrupted = new AtomicBoolean();
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
Thread thread = new Thread(() -> {
try {
try {
startGate.await();
} catch (InterruptedException e) {
interrupted.set(true);
return;
}
barrier.await();
Random random = new Random(random().nextLong());
for (int j = 0; j < numberOfEntries; j++) {
Integer key = random.nextInt(numberOfEntries);
cache.put(key, Integer.toString(j));
}
} finally {
endGate.countDown();
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
startGate.countDown();
endGate.await();
assertFalse(interrupted.get());
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
cache.refresh();
assertEquals(500, cache.count());

View File

@ -38,7 +38,6 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.Version;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@ -54,14 +53,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
*
*/
public class LuceneTests extends ESTestCase {
/**
* simple test that ensures that we bump the version on Upgrade
*/
public void testVersion() {
// note this is just a silly sanity check, we test it in lucene, and we point to it this way
assertEquals(Lucene.VERSION, Version.LATEST);
}
public void testWaitForIndex() throws Exception {
final MockDirectoryWrapper dir = newMockDirectory();

View File

@ -111,7 +111,7 @@ public class CodecTests extends ESTestCase {
SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap());
AnalysisService analysisService = new AnalysisRegistry(null, new Environment(nodeSettings)).build(settings);
MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap());
MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry);
MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry, () -> null);
return new CodecService(service, ESLoggerFactory.getLogger("test"));
}

View File

@ -71,11 +71,9 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.Engine.Searcher;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.MapperBuilders;
import org.elasticsearch.index.mapper.MapperService;
@ -272,7 +270,7 @@ public class InternalEngineTests extends ESTestCase {
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, INDEX_SETTINGS), indexSettings
EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings
, null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig,
iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), new Engine.EventListener() {
@Override
@ -1930,7 +1928,7 @@ public class InternalEngineTests extends ESTestCase {
AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry();
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry);
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService);
this.docMapper = b.build(mapperService);
}
@ -1977,7 +1975,7 @@ public class InternalEngineTests extends ESTestCase {
/* create a TranslogConfig that has been created with a different UUID */
TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE);
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettings()
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexSettings()
, null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getMergeSchedulerConfig(),
config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener()
, config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5));

View File

@ -48,7 +48,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
@ -224,7 +223,7 @@ public class ShadowEngineTests extends ESTestCase {
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, indexSettings), indexSettings
EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings
, null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig,
iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(null, logger), new Engine.EventListener() {
@Override

View File

@ -55,7 +55,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser()));
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry);
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject(ExternalMetadataMapper.CONTENT_TYPE)
@ -101,7 +101,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry);
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
@ -160,7 +160,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap());
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(),
indexService.analysisService(), indexService.similarityService(), mapperRegistry);
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext);
DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(
XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")

View File

@ -236,9 +236,9 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
IndicesModule indicesModule = new IndicesModule();
indicesModule.registerMetadataMapper("_dummy", new DummyMetadataFieldMapper.TypeParser());
final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry);
MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext);
DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService,
indexService.analysisService(), indexService.similarityService(), mapperRegistry);
indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::getQueryShardContext);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}"));

View File

@ -0,0 +1,287 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.sameInstance;
public class ExtractQueryTermsServiceTests extends ESTestCase {
public final static String QUERY_TERMS_FIELD = "extracted_terms";
public final static String UNKNOWN_QUERY_FIELD = "unknown_query";
public static FieldType QUERY_TERMS_FIELD_TYPE = new FieldType();
static {
QUERY_TERMS_FIELD_TYPE.setTokenized(false);
QUERY_TERMS_FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
QUERY_TERMS_FIELD_TYPE.freeze();
}
public void testExtractQueryMetadata() {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
TermQuery termQuery1 = new TermQuery(new Term("field1", "term1"));
bq.add(termQuery1, BooleanClause.Occur.SHOULD);
TermQuery termQuery2 = new TermQuery(new Term("field2", "term2"));
bq.add(termQuery2, BooleanClause.Occur.SHOULD);
ParseContext.Document document = new ParseContext.Document();
ExtractQueryTermsService.extractQueryTerms(bq.build(), document, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD, QUERY_TERMS_FIELD_TYPE);
Collections.sort(document.getFields(), (field1, field2) -> field1.binaryValue().compareTo(field2.binaryValue()));
assertThat(document.getFields().size(), equalTo(2));
assertThat(document.getFields().get(0).name(), equalTo(QUERY_TERMS_FIELD));
assertThat(document.getFields().get(0).binaryValue().utf8ToString(), equalTo("field1\u0000term1"));
assertThat(document.getFields().get(1).name(), equalTo(QUERY_TERMS_FIELD));
assertThat(document.getFields().get(1).binaryValue().utf8ToString(), equalTo("field2\u0000term2"));
}
public void testExtractQueryMetadata_unsupported() {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
TermQuery termQuery1 = new TermQuery(new Term("field1", "term1"));
bq.add(termQuery1, BooleanClause.Occur.SHOULD);
TermQuery termQuery2 = new TermQuery(new Term("field2", "term2"));
bq.add(termQuery2, BooleanClause.Occur.SHOULD);
TermRangeQuery query = new TermRangeQuery("field1", new BytesRef("a"), new BytesRef("z"), true, true);
ParseContext.Document document = new ParseContext.Document();
ExtractQueryTermsService.extractQueryTerms(query, document, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD, QUERY_TERMS_FIELD_TYPE);
assertThat(document.getFields().size(), equalTo(1));
assertThat(document.getFields().get(0).name(), equalTo(UNKNOWN_QUERY_FIELD));
assertThat(document.getFields().get(0).binaryValue().utf8ToString(), equalTo(""));
}
public void testExtractQueryMetadata_termQuery() {
TermQuery termQuery = new TermQuery(new Term("_field", "_term"));
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(termQuery));
assertThat(terms.size(), equalTo(1));
assertThat(terms.get(0).field(), equalTo(termQuery.getTerm().field()));
assertThat(terms.get(0).bytes(), equalTo(termQuery.getTerm().bytes()));
}
public void testExtractQueryMetadata_phraseQuery() {
PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2");
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(phraseQuery));
assertThat(terms.size(), equalTo(1));
assertThat(terms.get(0).field(), equalTo(phraseQuery.getTerms()[0].field()));
assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes()));
}
public void testExtractQueryMetadata_booleanQuery() {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2");
builder.add(phraseQuery, BooleanClause.Occur.SHOULD);
BooleanQuery.Builder subBuilder = new BooleanQuery.Builder();
TermQuery termQuery2 = new TermQuery(new Term("_field1", "_term"));
subBuilder.add(termQuery2, BooleanClause.Occur.MUST);
TermQuery termQuery3 = new TermQuery(new Term("_field3", "_long_term"));
subBuilder.add(termQuery3, BooleanClause.Occur.MUST);
builder.add(subBuilder.build(), BooleanClause.Occur.SHOULD);
BooleanQuery booleanQuery = builder.build();
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery));
Collections.sort(terms);
assertThat(terms.size(), equalTo(3));
assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field()));
assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes()));
assertThat(terms.get(1).field(), equalTo(phraseQuery.getTerms()[0].field()));
assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[0].bytes()));
assertThat(terms.get(2).field(), equalTo(termQuery3.getTerm().field()));
assertThat(terms.get(2).bytes(), equalTo(termQuery3.getTerm().bytes()));
}
public void testExtractQueryMetadata_booleanQuery_onlyShould() {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1"));
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2"));
builder.add(termQuery2, BooleanClause.Occur.SHOULD);
BooleanQuery.Builder subBuilder = new BooleanQuery.Builder();
TermQuery termQuery3 = new TermQuery(new Term("_field1", "_term"));
subBuilder.add(termQuery3, BooleanClause.Occur.SHOULD);
TermQuery termQuery4 = new TermQuery(new Term("_field3", "_long_term"));
subBuilder.add(termQuery4, BooleanClause.Occur.SHOULD);
builder.add(subBuilder.build(), BooleanClause.Occur.SHOULD);
BooleanQuery booleanQuery = builder.build();
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery));
Collections.sort(terms);
assertThat(terms.size(), equalTo(4));
assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field()));
assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes()));
assertThat(terms.get(1).field(), equalTo(termQuery2.getTerm().field()));
assertThat(terms.get(1).bytes(), equalTo(termQuery2.getTerm().bytes()));
assertThat(terms.get(2).field(), equalTo(termQuery3.getTerm().field()));
assertThat(terms.get(2).bytes(), equalTo(termQuery3.getTerm().bytes()));
assertThat(terms.get(3).field(), equalTo(termQuery4.getTerm().field()));
assertThat(terms.get(3).bytes(), equalTo(termQuery4.getTerm().bytes()));
}
public void testExtractQueryMetadata_booleanQueryWithMustNot() {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
builder.add(termQuery1, BooleanClause.Occur.MUST_NOT);
PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2");
builder.add(phraseQuery, BooleanClause.Occur.SHOULD);
BooleanQuery booleanQuery = builder.build();
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(booleanQuery));
assertThat(terms.size(), equalTo(1));
assertThat(terms.get(0).field(), equalTo(phraseQuery.getTerms()[0].field()));
assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes()));
}
public void testExtractQueryMetadata_constantScoreQuery() {
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(termQuery1);
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(constantScoreQuery));
assertThat(terms.size(), equalTo(1));
assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field()));
assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes()));
}
public void testExtractQueryMetadata_boostQuery() {
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
BoostQuery constantScoreQuery = new BoostQuery(termQuery1, 1f);
List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(constantScoreQuery));
assertThat(terms.size(), equalTo(1));
assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field()));
assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes()));
}
public void testExtractQueryMetadata_unsupportedQuery() {
TermRangeQuery termRangeQuery = new TermRangeQuery("_field", null, null, true, false);
try {
ExtractQueryTermsService.extractQueryTerms(termRangeQuery);
fail("UnsupportedQueryException expected");
} catch (ExtractQueryTermsService.UnsupportedQueryException e) {
assertThat(e.getUnsupportedQuery(), sameInstance(termRangeQuery));
}
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
BooleanQuery.Builder builder = new BooleanQuery.Builder();;
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
builder.add(termRangeQuery, BooleanClause.Occur.SHOULD);
BooleanQuery bq = builder.build();
try {
ExtractQueryTermsService.extractQueryTerms(bq);
fail("UnsupportedQueryException expected");
} catch (ExtractQueryTermsService.UnsupportedQueryException e) {
assertThat(e.getUnsupportedQuery(), sameInstance(termRangeQuery));
}
}
public void testCreateQueryMetadataQuery() throws Exception {
MemoryIndex memoryIndex = new MemoryIndex(false);
memoryIndex.addField("field1", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
memoryIndex.addField("field2", "some more text", new WhitespaceAnalyzer());
memoryIndex.addField("_field3", "unhide me", new WhitespaceAnalyzer());
memoryIndex.addField("field4", "123", new WhitespaceAnalyzer());
IndexReader indexReader = memoryIndex.createSearcher().getIndexReader();
Query query = ExtractQueryTermsService.createQueryTermsQuery(indexReader, QUERY_TERMS_FIELD, UNKNOWN_QUERY_FIELD);
assertThat(query, instanceOf(TermsQuery.class));
// no easy way to get to the terms in TermsQuery,
// if there a less then 16 terms then it gets rewritten to bq and then we can easily check the terms
BooleanQuery booleanQuery = (BooleanQuery) ((ConstantScoreQuery) query.rewrite(indexReader)).getQuery();
assertThat(booleanQuery.clauses().size(), equalTo(15));
assertClause(booleanQuery, 0, QUERY_TERMS_FIELD, "_field3\u0000me");
assertClause(booleanQuery, 1, QUERY_TERMS_FIELD, "_field3\u0000unhide");
assertClause(booleanQuery, 2, QUERY_TERMS_FIELD, "field1\u0000brown");
assertClause(booleanQuery, 3, QUERY_TERMS_FIELD, "field1\u0000dog");
assertClause(booleanQuery, 4, QUERY_TERMS_FIELD, "field1\u0000fox");
assertClause(booleanQuery, 5, QUERY_TERMS_FIELD, "field1\u0000jumps");
assertClause(booleanQuery, 6, QUERY_TERMS_FIELD, "field1\u0000lazy");
assertClause(booleanQuery, 7, QUERY_TERMS_FIELD, "field1\u0000over");
assertClause(booleanQuery, 8, QUERY_TERMS_FIELD, "field1\u0000quick");
assertClause(booleanQuery, 9, QUERY_TERMS_FIELD, "field1\u0000the");
assertClause(booleanQuery, 10, QUERY_TERMS_FIELD, "field2\u0000more");
assertClause(booleanQuery, 11, QUERY_TERMS_FIELD, "field2\u0000some");
assertClause(booleanQuery, 12, QUERY_TERMS_FIELD, "field2\u0000text");
assertClause(booleanQuery, 13, QUERY_TERMS_FIELD, "field4\u0000123");
assertClause(booleanQuery, 14, UNKNOWN_QUERY_FIELD, "");
}
public void testSelectTermsListWithHighestSumOfTermLength() {
Set<Term> terms1 = new HashSet<>();
int shortestTerms1Length = Integer.MAX_VALUE;
int sumTermLength = randomIntBetween(1, 128);
while (sumTermLength > 0) {
int length = randomInt(sumTermLength);
shortestTerms1Length = Math.min(shortestTerms1Length, length);
terms1.add(new Term("field", randomAsciiOfLength(length)));
sumTermLength -= length;
}
Set<Term> terms2 = new HashSet<>();
int shortestTerms2Length = Integer.MAX_VALUE;
sumTermLength = randomIntBetween(1, 128);
while (sumTermLength > 0) {
int length = randomInt(sumTermLength);
shortestTerms2Length = Math.min(shortestTerms2Length, length);
terms2.add(new Term("field", randomAsciiOfLength(length)));
sumTermLength -= length;
}
Set<Term> result = ExtractQueryTermsService.selectTermListWithTheLongestShortestTerm(terms1, terms2);
Set<Term> expected = shortestTerms1Length >= shortestTerms2Length ? terms1 : terms2;
assertThat(result, sameInstance(expected));
}
private void assertClause(BooleanQuery booleanQuery, int i, String expectedField, String expectedValue) {
assertThat(booleanQuery.clauses().get(i).getOccur(), equalTo(BooleanClause.Occur.SHOULD));
assertThat(((TermQuery) booleanQuery.clauses().get(i).getQuery()).getTerm().field(), equalTo(expectedField));
assertThat(((TermQuery) booleanQuery.clauses().get(i).getQuery()).getTerm().bytes().utf8ToString(), equalTo(expectedValue));
}
}

View File

@ -0,0 +1,95 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Before;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
private MapperService mapperService;
@Before
public void init() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
mapperService = indexService.mapperService();
String mapper = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "string").endObject().endObject()
.endObject().endObject().string();
mapperService.merge("type", new CompressedXContent(mapper), true, true);
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME)
.startObject("properties").startObject("query").field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), true, true);
}
public void testPercolatorFieldMapper() throws Exception {
ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.field("query", termQuery("field", "value"))
.endObject().bytes());
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(1));
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME)[0].binaryValue().utf8ToString(), equalTo("field\0value"));
}
public void testPercolatorFieldMapper_noQuery() throws Exception {
ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.endObject().bytes());
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(0));
try {
mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.nullField("query")
.endObject().bytes());
} catch (MapperParsingException e) {
assertThat(e.getDetailedMessage(), containsString("query malformed, must start with start_object"));
}
}
public void testAllowNoAdditionalSettings() throws Exception {
IndexService indexService = createIndex("test1", Settings.EMPTY);
MapperService mapperService = indexService.mapperService();
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME)
.startObject("properties").startObject("query").field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string();
try {
mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), true, true);
fail("MapperParsingException expected");
} catch (MapperParsingException e) {
assertThat(e.getMessage(), equalTo("Mapping definition for [query] has unsupported parameters: [index : no]"));
}
}
}

View File

@ -255,7 +255,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
ScriptService scriptService = injector.getInstance(ScriptService.class);
SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class);
MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry);
MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, () -> queryShardContext);
indexFieldDataService = new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService);
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new IndicesWarmer(idxSettings.getNodeSettings(), null), new BitsetFilterCache.Listener() {
@Override

View File

@ -32,6 +32,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
@ -55,6 +56,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
@ -136,8 +138,8 @@ public class TranslogTests extends ESTestCase {
private TranslogConfig getTranslogConfig(Path path) {
Settings build = Settings.settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT)
.build();
.put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT)
.build();
ByteSizeValue bufferSize = randomBoolean() ? TranslogConfig.DEFAULT_BUFFER_SIZE : new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES);
return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), BigArrays.NON_RECYCLING_INSTANCE, bufferSize);
}
@ -335,9 +337,9 @@ public class TranslogTests extends ESTestCase {
assertEquals(6, copy.estimatedNumberOfOperations());
assertEquals(431, copy.getTranslogSizeInBytes());
assertEquals("\"translog\"{\n" +
" \"operations\" : 6,\n" +
" \"size_in_bytes\" : 431\n" +
"}", copy.toString().trim());
" \"operations\" : 6,\n" +
" \"size_in_bytes\" : 431\n" +
"}", copy.toString().trim());
try {
new TranslogStats(1, -1);
@ -634,7 +636,9 @@ public class TranslogTests extends ESTestCase {
assertFileIsPresent(translog, 1);
}
/** Tests that concurrent readers and writes maintain view and snapshot semantics */
/**
* Tests that concurrent readers and writes maintain view and snapshot semantics
*/
public void testConcurrentWriteViewsAndSnapshot() throws Throwable {
final Thread[] writers = new Thread[randomIntBetween(1, 10)];
final Thread[] readers = new Thread[randomIntBetween(1, 10)];
@ -833,7 +837,7 @@ public class TranslogTests extends ESTestCase {
int count = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
if (rarely() && translogOperations > op+1) {
if (rarely() && translogOperations > op + 1) {
translog.commit();
}
}
@ -912,7 +916,7 @@ public class TranslogTests extends ESTestCase {
final TranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
for (int i = 0; i < numOps; i++) {
ByteBuffer buffer = ByteBuffer.allocate(4);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4*i);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i);
buffer.flip();
final int value = buffer.getInt();
assertEquals(i, value);
@ -951,9 +955,9 @@ public class TranslogTests extends ESTestCase {
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
final boolean commit = commitOften ? frequently() : rarely();
if (commit && op < translogOperations-1) {
if (commit && op < translogOperations - 1) {
translog.commit();
minUncommittedOp = op+1;
minUncommittedOp = op + 1;
translogGeneration = translog.getGeneration();
}
}
@ -987,7 +991,7 @@ public class TranslogTests extends ESTestCase {
public void testRecoveryUncommitted() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
final int prepareOp = randomIntBetween(0, translogOperations-1);
final int prepareOp = randomIntBetween(0, translogOperations - 1);
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
@ -1040,7 +1044,7 @@ public class TranslogTests extends ESTestCase {
public void testRecoveryUncommittedFileExists() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
final int prepareOp = randomIntBetween(0, translogOperations-1);
final int prepareOp = randomIntBetween(0, translogOperations - 1);
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
@ -1094,7 +1098,7 @@ public class TranslogTests extends ESTestCase {
}
}
public void testRecoveryUncommittedCorryptedCheckpoint() throws IOException {
public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = 100;
final int prepareOp = 44;
@ -1116,10 +1120,10 @@ public class TranslogTests extends ESTestCase {
config.setTranslogGeneration(translogGeneration);
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Checkpoint corrupted = new Checkpoint(0,0,0);
Checkpoint corrupted = new Checkpoint(0, 0, 0);
Checkpoint.write(config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
try (Translog translog = new Translog(config)) {
fail("corrupted");
fail("corrupted");
} catch (IllegalStateException ex) {
assertEquals(ex.getMessage(), "Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=2683, numOps=55, translogFileGeneration= 2} but got: Checkpoint{offset=0, numOps=0, translogFileGeneration= 0}");
}
@ -1157,7 +1161,7 @@ public class TranslogTests extends ESTestCase {
List<Translog.Location> locations = new ArrayList<>();
List<Translog.Location> locations2 = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
try(Translog translog2 = create(createTempDir())) {
try (Translog translog2 = create(createTempDir())) {
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
@ -1196,7 +1200,7 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
translog.close();
config.setTranslogGeneration(new Translog.TranslogGeneration(randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()),translogGeneration.translogFileGeneration));
config.setTranslogGeneration(new Translog.TranslogGeneration(randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()), translogGeneration.translogFileGeneration));
try {
new Translog(config);
fail("translog doesn't belong to this UUID");
@ -1283,12 +1287,12 @@ public class TranslogTests extends ESTestCase {
case CREATE:
case INDEX:
op = new Translog.Index("test", threadId + "_" + opCount,
randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
break;
case DELETE:
op = new Translog.Delete(new Term("_uid", threadId + "_" + opCount),
1 + randomInt(100000),
randomFrom(VersionType.values()));
1 + randomInt(100000),
randomFrom(VersionType.values()));
break;
default:
throw new ElasticsearchException("not supported op type");
@ -1307,19 +1311,20 @@ public class TranslogTests extends ESTestCase {
return translog.add(op);
}
protected void afterAdd() throws IOException {}
protected void afterAdd() throws IOException {
}
}
public void testFailFlush() throws IOException {
Path tempDir = createTempDir();
final AtomicBoolean fail = new AtomicBoolean();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config);
List<Translog.Location> locations = new ArrayList<>();
int opsSynced = 0;
boolean failed = false;
while(failed == false) {
while (failed == false) {
try {
locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
translog.sync();
@ -1331,10 +1336,14 @@ public class TranslogTests extends ESTestCase {
failed = true;
assertFalse(translog.isOpen());
assertEquals("__FAKE__ no space left on device", ex.getMessage());
}
fail.set(randomBoolean());
}
if (randomBoolean()) {
fail.failAlways();
} else {
fail.failNever();
}
}
fail.set(false);
fail.failNever();
if (randomBoolean()) {
try {
locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
@ -1370,7 +1379,7 @@ public class TranslogTests extends ESTestCase {
assertFalse(translog.isOpen());
translog.close(); // we are closed
config.setTranslogGeneration(translogGeneration);
try (Translog tlog = new Translog(config)){
try (Translog tlog = new Translog(config)) {
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
assertFalse(tlog.syncNeeded());
@ -1393,7 +1402,7 @@ public class TranslogTests extends ESTestCase {
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertEquals(opsAdded+1, snapshot.estimatedTotalOperations());
assertEquals(opsAdded + 1, snapshot.estimatedTotalOperations());
for (int i = 0; i < opsAdded; i++) {
assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation);
Translog.Operation next = snapshot.next();
@ -1405,13 +1414,13 @@ public class TranslogTests extends ESTestCase {
public void testTragicEventCanBeAnyException() throws IOException {
Path tempDir = createTempDir();
final AtomicBoolean fail = new AtomicBoolean();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
assumeFalse("this won't work if we sync on any op",config.isSyncOnEachOperation());
assumeFalse("this won't work if we sync on any op", config.isSyncOnEachOperation());
Translog translog = getFailableTranslog(fail, config, false, true);
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
fail.set(true);
fail.failAlways();
try {
Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
@ -1427,12 +1436,12 @@ public class TranslogTests extends ESTestCase {
assertTrue(ex.getCause() instanceof UnknownException);
}
assertFalse(translog.isOpen());
assertTrue(translog.getTragicException() instanceof UnknownException);
assertTrue(translog.getTragicException() instanceof UnknownException);
}
public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException {
Path tempDir = createTempDir();
final AtomicBoolean fail = new AtomicBoolean(false);
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config);
@ -1469,7 +1478,7 @@ public class TranslogTests extends ESTestCase {
// this holds a reference to the current tlog channel such that it's not closed
// if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip
// otherwise our assertions here are off by one sometimes.
fail.set(true);
fail.failAlways();
for (int i = 0; i < threadCount; i++) {
threads[i].join();
}
@ -1520,11 +1529,41 @@ public class TranslogTests extends ESTestCase {
}
}
}
private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException {
private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException {
return getFailableTranslog(fail, config, randomBoolean(), false);
}
private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException {
private static class FailSwitch {
private volatile int failRate;
private volatile boolean onceFailedFailAlways = false;
public boolean fail() {
boolean fail = randomIntBetween(1, 100) <= failRate;
if (fail && onceFailedFailAlways) {
failAlways();
}
return fail;
}
public void failNever() {
failRate = 0;
}
public void failAlways() {
failRate = 100;
}
public void failRandomly() {
failRate = randomIntBetween(1, 100);
}
public void onceFailedFailAlways() {
onceFailedFailAlways = true;
}
}
private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException {
return new Translog(config) {
@Override
TranslogWriter.ChannelFactory getChannelFactory() {
@ -1534,23 +1573,56 @@ public class TranslogTests extends ESTestCase {
@Override
public FileChannel open(Path file) throws IOException {
FileChannel channel = factory.open(file);
return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel);
boolean success = false;
try {
ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel);
success = true;
return throwingFileChannel;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(channel);
}
}
}
};
}
@Override
protected boolean assertBytesAtLocation(Location location, BytesReference expectedBytes) throws IOException {
return true; // we don't wanna fail in the assert
}
};
}
public static class ThrowingFileChannel extends FilterFileChannel {
private final AtomicBoolean fail;
private final FailSwitch fail;
private final boolean partialWrite;
private final boolean throwUnknownException;
public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) {
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
super(delegate);
this.fail = fail;
this.partialWrite = partialWrite;
this.throwUnknownException = throwUnknownException;
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dst);
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dsts, offset, length);
}
@Override
@ -1565,7 +1637,7 @@ public class TranslogTests extends ESTestCase {
public int write(ByteBuffer src) throws IOException {
if (fail.get()) {
if (fail.fail()) {
if (partialWrite) {
if (src.hasRemaining()) {
final int pos = src.position();
@ -1585,6 +1657,22 @@ public class TranslogTests extends ESTestCase {
}
return super.write(src);
}
@Override
public void force(boolean metaData) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
super.force(metaData);
}
@Override
public long position() throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.position();
}
}
private static final class UnknownException extends RuntimeException {
@ -1613,4 +1701,171 @@ public class TranslogTests extends ESTestCase {
// all is well
}
}
public void testRecoverWithUnbackedNextGen() throws IOException {
translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)));
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
config.setTranslogGeneration(translogGeneration);
try (Translog tlog = new Translog(config)) {
assertNotNull(translogGeneration);
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 1; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8()));
}
}
tlog.add(new Translog.Index("test", "" + 1, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
try (Translog tlog = new Translog(config)) {
assertNotNull(translogGeneration);
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 2; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8()));
}
}
}
}
public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
// don't copy the new file
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
config.setTranslogGeneration(translogGeneration);
try {
Translog tlog = new Translog(config);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
}
public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
translog.add(new Translog.Index("test", "" + 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)));
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
// we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog"));
config.setTranslogGeneration(translogGeneration);
try (Translog tlog = new Translog(config)) {
assertNotNull(translogGeneration);
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 1; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.toUtf8()));
}
}
tlog.add(new Translog.Index("test", "" + 1, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
try {
Translog tlog = new Translog(config);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
}
/**
* This test adds operations to the translog which might randomly throw an IOException. The only thing this test verifies is
* that we can, after we hit an exception, open and recover the translog successfully and retrieve all successfully synced operations
* from the transaction log.
*/
public void testWithRandomException() throws IOException {
final int runs = randomIntBetween(5, 10);
for (int run = 0; run < runs; run++) {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
fail.failRandomly();
TranslogConfig config = getTranslogConfig(tempDir);
final int numOps = randomIntBetween(100, 200);
List<String> syncedDocs = new ArrayList<>();
List<String> unsynced = new ArrayList<>();
if (randomBoolean()) {
fail.onceFailedFailAlways();
}
try {
final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false);
try {
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
String doc = lineFileDocs.nextDoc().toString();
failableTLog.add(new Translog.Index("test", "" + opsAdded, doc.getBytes(Charset.forName("UTF-8"))));
unsynced.add(doc);
if (randomBoolean()) {
failableTLog.sync();
syncedDocs.addAll(unsynced);
unsynced.clear();
}
if (randomFloat() < 0.1) {
failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
syncedDocs.addAll(unsynced);
unsynced.clear();
if (randomBoolean()) {
failableTLog.prepareCommit();
}
failableTLog.commit();
syncedDocs.clear();
}
}
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// fair enough
} catch (IOException ex) {
assertEquals(ex.getMessage(), "__FAKE__ no space left on device");
} finally {
config.setTranslogGeneration(failableTLog.getGeneration());
IOUtils.closeWhileHandlingException(failableTLog);
}
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// failed - that's ok, we didn't even create it
}
// now randomly open this failing tlog again just to make sure we can also recover from failing during recovery
if (randomBoolean()) {
try {
IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false));
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// failed - that's ok, we didn't even create it
}
}
try (Translog translog = new Translog(config)) {
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertEquals(syncedDocs.size(), snapshot.estimatedTotalOperations());
for (int i = 0; i < syncedDocs.size(); i++) {
Translog.Operation next = snapshot.next();
assertEquals(syncedDocs.get(i), next.getSource().source.toUtf8());
assertNotNull("operation " + i + " must be non-null", next);
}
}
}
}
}
}

View File

@ -36,7 +36,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBui
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
import org.elasticsearch.action.percolate.PercolateSourceBuilder;
@ -49,10 +48,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.suggest.SuggestBuilders;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
@ -86,7 +82,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1", "test2"), true);
verify(getFieldMapping("test1", "test2"), true);
verify(getMapping("test1", "test2"), true);
verify(getWarmer("test1", "test2"), true);
verify(getSettings("test1", "test2"), true);
IndicesOptions options = IndicesOptions.strictExpandOpen();
@ -107,7 +102,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1", "test2").setIndicesOptions(options), true);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
verify(getMapping("test1", "test2").setIndicesOptions(options), true);
verify(getWarmer("test1", "test2").setIndicesOptions(options), true);
verify(getSettings("test1", "test2").setIndicesOptions(options), true);
options = IndicesOptions.lenientExpandOpen();
@ -128,7 +122,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
options = IndicesOptions.strictExpandOpen();
@ -151,7 +144,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
}
@ -182,7 +174,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), true);
verify(getFieldMapping("test1").setIndicesOptions(options), true);
verify(getMapping("test1").setIndicesOptions(options), true);
verify(getWarmer("test1").setIndicesOptions(options), true);
verify(getSettings("test1").setIndicesOptions(options), true);
options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
@ -203,7 +194,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getWarmer("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
assertAcked(client().admin().indices().prepareOpen("test1"));
@ -227,7 +217,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getWarmer("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
}
@ -249,7 +238,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), true);
verify(getFieldMapping("test1").setIndicesOptions(options), true);
verify(getMapping("test1").setIndicesOptions(options), true);
verify(getWarmer("test1").setIndicesOptions(options), true);
verify(getSettings("test1").setIndicesOptions(options), true);
options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
@ -269,7 +257,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getWarmer("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
assertAcked(prepareCreate("test1"));
@ -292,7 +279,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases("test1").setIndicesOptions(options), false);
verify(getFieldMapping("test1").setIndicesOptions(options), false);
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getWarmer("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
}
@ -346,7 +332,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getWarmer(indices), false);
verify(getSettings(indices), false);
// Now force allow_no_indices=true
@ -368,7 +353,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases(indices).setIndicesOptions(options), false);
verify(getFieldMapping(indices).setIndicesOptions(options), false);
verify(getMapping(indices).setIndicesOptions(options), false);
verify(getWarmer(indices).setIndicesOptions(options), false);
verify(getSettings(indices).setIndicesOptions(options), false);
assertAcked(prepareCreate("foobar"));
@ -393,7 +377,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getWarmer(indices), false);
verify(getSettings(indices).setIndicesOptions(options), false);
// Verify defaults for wildcards, with two wildcard expression and one existing index
@ -415,7 +398,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases(indices), false);
verify(getFieldMapping(indices), false);
verify(getMapping(indices), false);
verify(getWarmer(indices), false);
verify(getSettings(indices).setIndicesOptions(options), false);
// Now force allow_no_indices=true
@ -437,7 +419,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getAliases(indices).setIndicesOptions(options), false);
verify(getFieldMapping(indices).setIndicesOptions(options), false);
verify(getMapping(indices).setIndicesOptions(options), false);
verify(getWarmer(indices).setIndicesOptions(options), false);
verify(getSettings(indices).setIndicesOptions(options), false);
}
@ -581,34 +562,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false));
}
public void testPutWarmer() throws Exception {
createIndex("foobar");
ensureYellow();
verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false);
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
}
public void testPutWarmerWildcard() throws Exception {
createIndex("foo", "foobar", "bar", "barbaz");
ensureYellow();
verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false);
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false);
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
}
public void testPutAlias() throws Exception {
createIndex("foobar");
ensureYellow();
@ -635,46 +588,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
}
public void testDeleteWarmer() throws Exception {
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(QueryBuilders.matchAllQuery());
IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "typ1" }, false, new IndexWarmersMetaData.SearchSource(source));
assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
ensureYellow();
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true);
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false);
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
}
public void testDeleteWarmerWildcard() throws Exception {
verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true);
SearchSourceBuilder source = new SearchSourceBuilder();
source.query(QueryBuilders.matchAllQuery());
IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "type1" }, false, new IndexWarmersMetaData.SearchSource(source));
assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry)));
assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry)));
assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry)));
ensureYellow();
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false);
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0));
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1));
assertAcked(client().admin().indices().prepareDelete("foo*"));
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true);
verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false);
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0));
}
public void testPutMapping() throws Exception {
verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true);
verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true);
@ -816,10 +729,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
return client().admin().indices().prepareGetMappings(indices);
}
private static GetWarmersRequestBuilder getWarmer(String... indices) {
return client().admin().indices().prepareGetWarmers(indices);
}
private static GetSettingsRequestBuilder getSettings(String... indices) {
return client().admin().indices().prepareGetSettings(indices);
}

View File

@ -1,143 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.warmer;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.hamcrest.Matchers;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
/**
*/
@ClusterScope(numDataNodes =0, scope= Scope.TEST)
public class GatewayIndicesWarmerIT extends ESIntegTestCase {
private final ESLogger logger = Loggers.getLogger(GatewayIndicesWarmerIT.class);
public void testStatePersistence() throws Exception {
logger.info("--> starting 1 nodes");
internalCluster().startNode();
logger.info("--> putting two templates");
createIndex("test");
ensureYellow();
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
.setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1"))));
assertAcked(client().admin().indices().preparePutWarmer("warmer_2")
.setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2"))));
logger.info("--> put template with warmer");
client().admin().indices().preparePutTemplate("template_1")
.setSource("{\n" +
" \"template\" : \"xxx\",\n" +
" \"warmers\" : {\n" +
" \"warmer_1\" : {\n" +
" \"types\" : [],\n" +
" \"source\" : {\n" +
" \"query\" : {\n" +
" \"match_all\" : {}\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}")
.execute().actionGet();
logger.info("--> verify warmers are registered in cluster state");
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(warmersMetaData, Matchers.notNullValue());
assertThat(warmersMetaData.entries().size(), equalTo(2));
IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
assertThat(templateWarmers, Matchers.notNullValue());
assertThat(templateWarmers.entries().size(), equalTo(1));
logger.info("--> restarting the node");
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return Settings.EMPTY;
}
});
ensureYellow();
logger.info("--> verify warmers are recovered");
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
for (int i = 0; i < warmersMetaData.entries().size(); i++) {
assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
}
logger.info("--> verify warmers in template are recovered");
IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size()));
for (int i = 0; i < templateWarmers.entries().size(); i++) {
assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name()));
assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source()));
}
logger.info("--> delete warmer warmer_1");
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet();
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
logger.info("--> verify warmers (delete) are registered in cluster state");
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(warmersMetaData, Matchers.notNullValue());
assertThat(warmersMetaData.entries().size(), equalTo(1));
logger.info("--> restarting the node");
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return Settings.EMPTY;
}
});
ensureYellow();
logger.info("--> verify warmers are recovered");
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
for (int i = 0; i < warmersMetaData.entries().size(); i++) {
assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
}
}
}

View File

@ -1,159 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.warmer;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import java.util.Arrays;
import java.util.List;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.hamcrest.Matchers.equalTo;
@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class IndicesWarmerBlocksIT extends ESIntegTestCase {
public void testPutWarmerWithBlocks() {
createIndex("test-blocks");
ensureGreen("test-blocks");
// Index reads are blocked, the warmer can't be registered
try {
enableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK);
} finally {
disableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
}
// Index writes are blocked, the warmer can be registered
try {
enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
assertAcked(client().admin().indices().preparePutWarmer("warmer_acked")
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
} finally {
disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
}
// Index metadata changes are blocked, the warmer can't be registered
try {
enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
}
// Index metadata changes are blocked, the warmer can't be registered
try {
enableIndexBlock("test-blocks", SETTING_READ_ONLY);
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK);
} finally {
disableIndexBlock("test-blocks", SETTING_READ_ONLY);
}
// Adding a new warmer is not possible when the cluster is read-only
try {
setClusterReadOnly(true);
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK);
} finally {
setClusterReadOnly(false);
}
}
public void testGetWarmerWithBlocks() {
createIndex("test-blocks");
ensureGreen("test-blocks");
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
// Request is not blocked
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
try {
enableIndexBlock("test-blocks", blockSetting);
GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get();
assertThat(response.warmers().size(), equalTo(1));
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = response.warmers().iterator().next();
assertThat(entry.key, equalTo("test-blocks"));
assertThat(entry.value.size(), equalTo(1));
assertThat(entry.value.iterator().next().name(), equalTo("warmer_block"));
} finally {
disableIndexBlock("test-blocks", blockSetting);
}
}
// Request is blocked
try {
enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
}
}
public void testDeleteWarmerWithBlocks() {
createIndex("test-blocks");
ensureGreen("test-blocks");
// Request is not blocked
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
try {
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
enableIndexBlock("test-blocks", blockSetting);
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
} finally {
disableIndexBlock("test-blocks", blockSetting);
}
}
// Request is blocked
for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
try {
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
enableIndexBlock("test-blocks", blockSetting);
assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
} finally {
disableIndexBlock("test-blocks", blockSetting);
}
}
}
}

View File

@ -1,287 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.warmer;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ESIntegTestCase;
import org.hamcrest.Matchers;
import java.util.List;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
public class SimpleIndicesWarmerIT extends ESIntegTestCase {
public void testSimpleWarmers() {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1")))
.execute().actionGet();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
.setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2")))
.execute().actionGet();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2")
.execute().actionGet();
assertThat(getWarmersResponse.getWarmers().size(), equalTo(0));
}
public void testTtemplateWarmer() {
client().admin().indices().preparePutTemplate("template_1")
.setSource("{\n" +
" \"template\" : \"*\",\n" +
" \"warmers\" : {\n" +
" \"warmer_1\" : {\n" +
" \"types\" : [],\n" +
" \"source\" : {\n" +
" \"query\" : {\n" +
" \"match_all\" : {}\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}")
.execute().actionGet();
createIndex("test");
ensureGreen();
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(warmersMetaData, Matchers.notNullValue());
assertThat(warmersMetaData.entries().size(), equalTo(1));
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
}
public void testCreateIndexWarmer() {
assertAcked(prepareCreate("test")
.setSource("{\n" +
" \"warmers\" : {\n" +
" \"warmer_1\" : {\n" +
" \"types\" : [],\n" +
" \"source\" : {\n" +
" \"query\" : {\n" +
" \"match_all\" : {}\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}"));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
assertThat(warmersMetaData, Matchers.notNullValue());
assertThat(warmersMetaData.entries().size(), equalTo(1));
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
}
public void testDeleteNonExistentIndexWarmer() {
createIndex("test");
try {
client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet();
fail("warmer foo should not exist");
} catch (IndexWarmerMissingException ex) {
assertThat(ex.names()[0], equalTo("foo"));
}
}
// issue 8991
public void testDeleteAllIndexWarmerDoesNotThrowWhenNoWarmers() {
createIndex("test");
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer()
.setIndices("test").setNames("_all").execute().actionGet();
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer()
.setIndices("test").setNames("foo", "_all", "bar").execute().actionGet();
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
}
public void testDeleteIndexWarmerTest() {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
assertThat(getWarmersResponse.warmers().size(), equalTo(1));
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
assertThat(entry.key, equalTo("test"));
assertThat(entry.value.size(), equalTo(1));
assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer"));
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get();
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
assertThat(getWarmersResponse.warmers().size(), equalTo(0));
}
// issue 3246
public void testEnsureThatIndexWarmersCanBeChangedOnRuntime() throws Exception {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.execute().actionGet();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
logger.info("--> Disabling warmers execution");
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.warmer.enabled", false)).execute().actionGet();
long warmerRunsAfterDisabling = getWarmerRuns();
assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L));
client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet();
assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling));
}
public void testGettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.execute().actionGet();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.execute().actionGet();
assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true));
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get();
assertThat(getWarmersResponse.warmers().size(), is(1));
getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get();
assertThat(getWarmersResponse.warmers().size(), is(1));
getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get();
assertThat(getWarmersResponse.warmers().size(), is(1));
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get();
assertThat(getWarmersResponse.warmers().size(), is(1));
}
private long getWarmerRuns() {
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet();
return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total();
}
public void testQueryCacheOnWarmer() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false)));
logger.info("register warmer with no query cache, validate no cache is used");
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
.get());
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
logger.info("register warmer with query cache, validate caching happened");
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()).setRequestCache(true))
.get());
// index again, to make sure it gets refreshed
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
logger.info("enable default query caching on the index level, and test that no flag on warmer still caches");
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true)));
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
.get());
// index again, to make sure it gets refreshed
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.percolator;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.percolate.PercolateSourceBuilder;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -187,18 +188,21 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
case 0:
response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
.setSource(onlyField1)
.setRefresh(true)
.execute().actionGet();
type1.incrementAndGet();
break;
case 1:
response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
.setSource(onlyField2)
.setRefresh(true)
.execute().actionGet();
type2.incrementAndGet();
break;
case 2:
response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
.setSource(field1And2)
.setRefresh(true)
.execute().actionGet();
type3.incrementAndGet();
break;
@ -247,7 +251,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
.setSource(onlyField1Doc).execute().actionGet();
assertNoFailures(response);
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
break;
case 1:
atLeastExpected = type2.get();
@ -255,7 +259,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
.setSource(onlyField2Doc).execute().actionGet();
assertNoFailures(response);
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
break;
case 2:
atLeastExpected = type3.get();
@ -263,7 +267,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
.setSource(field1AndField2Doc).execute().actionGet();
assertNoFailures(response);
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
break;
}
}
@ -327,6 +331,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
} while (!liveIds.remove(id));
DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id)
.setRefresh(true)
.execute().actionGet();
assertThat(response.getId(), equalTo(id));
assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true));
@ -334,6 +339,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
String id = Integer.toString(idGen.getAndIncrement());
IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
.setSource(doc)
.setRefresh(true)
.execute().actionGet();
liveIds.add(id);
assertThat(response.isCreated(), equalTo(true)); // We only add new docs
@ -357,9 +363,9 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
indexThreads[i].start();
}
XContentBuilder percolateDoc = XContentFactory.jsonBuilder().startObject().startObject("doc")
String percolateDoc = XContentFactory.jsonBuilder().startObject()
.field("field1", "value")
.endObject().endObject();
.endObject().string();
for (int counter = 0; counter < numberPercolateOperation; counter++) {
Thread.sleep(5);
semaphore.acquire(numIndexThreads);
@ -369,7 +375,9 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
}
int atLeastExpected = liveIds.size();
PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type")
.setSource(percolateDoc).execute().actionGet();
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(percolateDoc))
.setSize(atLeastExpected)
.get();
assertThat(response.getShardFailures(), emptyArray());
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
assertThat(response.getMatches().length, equalTo(atLeastExpected));

Some files were not shown because too many files have changed in this diff Show More