Merge branch 'feature/ingest' into enhancement/move_to_core
This commit is contained in:
commit
1ea690e814
|
@ -102,8 +102,8 @@ if (isEclipse) {
|
|||
}
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked"
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
|
||||
|
||||
forbiddenPatterns {
|
||||
exclude '**/*.json'
|
||||
|
|
|
@ -566,7 +566,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
INDEX_WARMER_MISSING_EXCEPTION(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, org.elasticsearch.search.warmer.IndexWarmerMissingException::new, 93),
|
||||
// 93 used to be for IndexWarmerMissingException
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
|
@ -286,7 +285,8 @@ public class Version {
|
|||
public static final Version CURRENT = V_3_0_0;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]";
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
|
||||
}
|
||||
|
||||
public static Version readVersion(StreamInput in) throws IOException {
|
||||
|
@ -457,7 +457,6 @@ public class Version {
|
|||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
|
||||
case V_0_20_7_ID:
|
||||
return V_0_20_7;
|
||||
case V_0_20_6_ID:
|
||||
|
@ -476,7 +475,6 @@ public class Version {
|
|||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
|
@ -511,7 +509,6 @@ public class Version {
|
|||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
|
@ -530,9 +527,8 @@ public class Version {
|
|||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
|
||||
default:
|
||||
return new Version(id, false, Lucene.VERSION);
|
||||
return new Version(id, false, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -127,12 +127,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
|||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.TransportBulkAction;
|
||||
import org.elasticsearch.action.bulk.TransportShardBulkAction;
|
||||
|
@ -323,9 +317,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
|
||||
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
|
||||
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
|
||||
registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
|
||||
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
|
||||
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
|
||||
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
|
||||
|
|
|
@ -35,7 +35,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable {
|
|||
/**
|
||||
* Sets the array of aliases that the action relates to
|
||||
*/
|
||||
AliasesRequest aliases(String[] aliases);
|
||||
AliasesRequest aliases(String... aliases);
|
||||
|
||||
/**
|
||||
* Returns true if wildcards expressions among aliases should be resolved, false otherwise
|
||||
|
|
|
@ -41,9 +41,9 @@ public interface IndicesRequest {
|
|||
IndicesOptions indicesOptions();
|
||||
|
||||
static interface Replaceable extends IndicesRequest {
|
||||
/*
|
||||
* Sets the array of indices that the action relates to
|
||||
/**
|
||||
* Sets the indices that the action relates to.
|
||||
*/
|
||||
IndicesRequest indices(String[] indices);
|
||||
IndicesRequest indices(String... indices);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterHealthRequest indices(String[] indices) {
|
||||
public ClusterHealthRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ public class IndicesExistsRequest extends MasterNodeReadRequest<IndicesExistsReq
|
|||
}
|
||||
|
||||
@Override
|
||||
public IndicesExistsRequest indices(String[] indices) {
|
||||
public IndicesExistsRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TypesExistsRequest extends MasterNodeReadRequest<TypesExistsRequest
|
|||
}
|
||||
|
||||
@Override
|
||||
public TypesExistsRequest indices(String[] indices) {
|
||||
public TypesExistsRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -37,8 +37,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
|||
public static enum Feature {
|
||||
ALIASES((byte) 0, "_aliases", "_alias"),
|
||||
MAPPINGS((byte) 1, "_mappings", "_mapping"),
|
||||
SETTINGS((byte) 2, "_settings"),
|
||||
WARMERS((byte) 3, "_warmers", "_warmer");
|
||||
SETTINGS((byte) 2, "_settings");
|
||||
|
||||
private static final Feature[] FEATURES = new Feature[Feature.values().length];
|
||||
|
||||
|
@ -97,7 +96,7 @@ public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS, Feature.WARMERS };
|
||||
private static final Feature[] DEFAULT_FEATURES = new Feature[] { Feature.ALIASES, Feature.MAPPINGS, Feature.SETTINGS };
|
||||
private Feature[] features = DEFAULT_FEATURES;
|
||||
private boolean humanReadable = false;
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -39,19 +38,15 @@ import java.util.List;
|
|||
*/
|
||||
public class GetIndexResponse extends ActionResponse {
|
||||
|
||||
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
|
||||
private String[] indices;
|
||||
|
||||
GetIndexResponse(String[] indices, ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers,
|
||||
GetIndexResponse(String[] indices,
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings,
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) {
|
||||
this.indices = indices;
|
||||
if (warmers != null) {
|
||||
this.warmers = warmers;
|
||||
}
|
||||
if (mappings != null) {
|
||||
this.mappings = mappings;
|
||||
}
|
||||
|
@ -74,14 +69,6 @@ public class GetIndexResponse extends ActionResponse {
|
|||
return indices();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
|
||||
return warmers;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
return warmers();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings() {
|
||||
return mappings;
|
||||
}
|
||||
|
@ -110,23 +97,6 @@ public class GetIndexResponse extends ActionResponse {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.indices = in.readStringArray();
|
||||
int warmersSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> warmersMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < warmersSize; i++) {
|
||||
String key = in.readString();
|
||||
int valueSize = in.readVInt();
|
||||
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
|
||||
in.readString(),
|
||||
in.readStringArray(),
|
||||
in.readOptionalBoolean(),
|
||||
in.readBoolean() ? new IndexWarmersMetaData.SearchSource(in) : null)
|
||||
);
|
||||
}
|
||||
warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
|
||||
}
|
||||
warmers = warmersMapBuilder.build();
|
||||
int mappingsSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> mappingsMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < mappingsSize; i++) {
|
||||
|
@ -164,21 +134,6 @@ public class GetIndexResponse extends ActionResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
out.writeVInt(warmers.size());
|
||||
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
out.writeString(indexEntry.key);
|
||||
out.writeVInt(indexEntry.value.size());
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
|
||||
out.writeString(warmerEntry.name());
|
||||
out.writeStringArray(warmerEntry.types());
|
||||
out.writeOptionalBoolean(warmerEntry.requestCache());
|
||||
boolean hasSource = warmerEntry.source() != null;
|
||||
out.writeBoolean(hasSource);
|
||||
if (hasSource) {
|
||||
warmerEntry.source().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
out.writeVInt(mappings.size());
|
||||
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) {
|
||||
out.writeString(indexEntry.key);
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -72,7 +71,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
@Override
|
||||
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
|
||||
final ActionListener<GetIndexResponse> listener) {
|
||||
ImmutableOpenMap<String, List<Entry>> warmersResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
|
||||
|
@ -80,15 +78,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
boolean doneAliases = false;
|
||||
boolean doneMappings = false;
|
||||
boolean doneSettings = false;
|
||||
boolean doneWarmers = false;
|
||||
for (Feature feature : features) {
|
||||
switch (feature) {
|
||||
case WARMERS:
|
||||
if (!doneWarmers) {
|
||||
warmersResult = state.metaData().findWarmers(concreteIndices, request.types(), Strings.EMPTY_ARRAY);
|
||||
doneWarmers = true;
|
||||
}
|
||||
break;
|
||||
case MAPPINGS:
|
||||
if (!doneMappings) {
|
||||
mappingsResult = state.metaData().findMappings(concreteIndices, request.types());
|
||||
|
@ -120,6 +111,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
throw new IllegalStateException("feature [" + feature + "] is not valid");
|
||||
}
|
||||
}
|
||||
listener.onResponse(new GetIndexResponse(concreteIndices, warmersResult, mappingsResult, aliasesResult, settings));
|
||||
listener.onResponse(new GetIndexResponse(concreteIndices, mappingsResult, aliasesResult, settings));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
|
@ -96,7 +97,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
* Sets the indices this put mapping operation will execute on.
|
||||
*/
|
||||
@Override
|
||||
public PutMappingRequest indices(String[] indices) {
|
||||
public PutMappingRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for the admin/warmers/delete API.
|
||||
*/
|
||||
public class DeleteWarmerAction extends Action<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
|
||||
|
||||
public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction();
|
||||
public static final String NAME = "indices:admin/warmers/delete";
|
||||
|
||||
private DeleteWarmerAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteWarmerResponse newResponse() {
|
||||
return new DeleteWarmerResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new DeleteWarmerRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request that deletes a index warmer (name, {@link org.elasticsearch.action.search.SearchRequest})
|
||||
* tuple from the clusters metadata.
|
||||
*/
|
||||
public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] names = Strings.EMPTY_ARRAY;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
|
||||
public DeleteWarmerRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new delete warmer request for the specified name.
|
||||
*
|
||||
* @param names the name (or wildcard expression) of the warmer to match, null to delete all.
|
||||
*/
|
||||
public DeleteWarmerRequest(String... names) {
|
||||
names(names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (CollectionUtils.isEmpty(names)) {
|
||||
validationException = addValidationError("warmer names are missing", validationException);
|
||||
} else {
|
||||
validationException = checkForEmptyString(validationException, names);
|
||||
}
|
||||
if (CollectionUtils.isEmpty(indices)) {
|
||||
validationException = addValidationError("indices are missing", validationException);
|
||||
} else {
|
||||
validationException = checkForEmptyString(validationException, indices);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
|
||||
boolean containsEmptyString = false;
|
||||
for (String string : strings) {
|
||||
if (!Strings.hasText(string)) {
|
||||
containsEmptyString = true;
|
||||
}
|
||||
}
|
||||
if (containsEmptyString) {
|
||||
validationException = addValidationError("types must not contain empty strings", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name to delete.
|
||||
*/
|
||||
@Nullable
|
||||
public String[] names() {
|
||||
return names;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name (or wildcard expression) of the index warmer to delete, or null
|
||||
* to delete all warmers.
|
||||
*/
|
||||
public DeleteWarmerRequest names(@Nullable String... names) {
|
||||
this.names = names;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the indices this put mapping operation will execute on.
|
||||
*/
|
||||
@Override
|
||||
public DeleteWarmerRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The indices the mappings will be put.
|
||||
*/
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return indicesOptions;
|
||||
}
|
||||
|
||||
public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
names = in.readStringArray();
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArrayNullable(names);
|
||||
out.writeStringArrayNullable(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeTimeout(out);
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* A builder for the {@link DeleteWarmerRequest}
|
||||
*
|
||||
* @see DeleteWarmerRequest for details
|
||||
*/
|
||||
public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
|
||||
|
||||
public DeleteWarmerRequestBuilder(ElasticsearchClient client, DeleteWarmerAction action) {
|
||||
super(client, action, new DeleteWarmerRequest());
|
||||
}
|
||||
|
||||
public DeleteWarmerRequestBuilder setIndices(String... indices) {
|
||||
request.indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name (or wildcard expression) of the index warmer to delete, or null
|
||||
* to delete all warmers.
|
||||
*/
|
||||
public DeleteWarmerRequestBuilder setNames(String... names) {
|
||||
request.names(names);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies what type of requested indices to ignore and wildcard indices expressions.
|
||||
* <p>
|
||||
* For example indices that don't exist.
|
||||
*/
|
||||
public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) {
|
||||
request.indicesOptions(options);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* An acknowledged response of delete warmer operation.
|
||||
*/
|
||||
public class DeleteWarmerResponse extends AcknowledgedResponse {
|
||||
|
||||
DeleteWarmerResponse() {
|
||||
super();
|
||||
}
|
||||
|
||||
DeleteWarmerResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Internal Actions executed on the master deleting the warmer from the cluster state metadata.
|
||||
*
|
||||
* Note: this is an internal API and should not be used / called by any client code.
|
||||
*/
|
||||
public class TransportDeleteWarmerAction extends TransportMasterNodeAction<DeleteWarmerRequest, DeleteWarmerResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteWarmerRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
// we go async right away
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteWarmerResponse newResponse() {
|
||||
return new DeleteWarmerResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener<DeleteWarmerResponse> listener) {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask<DeleteWarmerResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected DeleteWarmerResponse newResponse(boolean acknowledged) {
|
||||
return new DeleteWarmerResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to delete warmer [{}] on indices [{}]", t, Arrays.toString(request.names()), concreteIndices);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
boolean globalFoundAtLeastOne = false;
|
||||
boolean deleteAll = false;
|
||||
for (int i=0; i<request.names().length; i++){
|
||||
if (request.names()[i].equals(MetaData.ALL)) {
|
||||
deleteAll = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (warmers != null) {
|
||||
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>();
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
boolean keepWarmer = true;
|
||||
for (String warmer : request.names()) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
|
||||
globalFoundAtLeastOne = true;
|
||||
keepWarmer = false;
|
||||
// don't add it...
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (keepWarmer) {
|
||||
entries.add(entry);
|
||||
}
|
||||
}
|
||||
// a change, update it...
|
||||
if (entries.size() != warmers.entries().size()) {
|
||||
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
|
||||
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
|
||||
mdBuilder.put(indexBuilder);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (globalFoundAtLeastOne == false && deleteAll == false) {
|
||||
throw new IndexWarmerMissingException(request.names());
|
||||
}
|
||||
|
||||
if (logger.isInfoEnabled()) {
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (warmers != null) {
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
for (String warmer : request.names()) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
|
||||
logger.info("[{}] delete warmer [{}]", index, entry.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if(deleteAll){
|
||||
logger.debug("no warmers to delete on index [{}]", index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for the admin/warmers/get API.
|
||||
*/
|
||||
public class GetWarmersAction extends Action<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
|
||||
|
||||
public static final GetWarmersAction INSTANCE = new GetWarmersAction();
|
||||
public static final String NAME = "indices:admin/warmers/get";
|
||||
|
||||
private GetWarmersAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetWarmersRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetWarmersRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetWarmersResponse newResponse() {
|
||||
return new GetWarmersResponse();
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A {@link ClusterInfoRequest} that fetches {@link org.elasticsearch.search.warmer.IndexWarmersMetaData} for
|
||||
* a list or all existing index warmers in the cluster-state
|
||||
*/
|
||||
public class GetWarmersRequest extends ClusterInfoRequest<GetWarmersRequest> {
|
||||
|
||||
private String[] warmers = Strings.EMPTY_ARRAY;
|
||||
|
||||
public GetWarmersRequest warmers(String[] warmers) {
|
||||
this.warmers = warmers;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] warmers() {
|
||||
return warmers;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
warmers = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(warmers);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.util.ArrayUtils;
|
||||
|
||||
/**
|
||||
* Builder for {@link GetWarmersRequest}
|
||||
*
|
||||
* @see GetWarmersRequest for details
|
||||
*/
|
||||
public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder<GetWarmersRequest, GetWarmersResponse, GetWarmersRequestBuilder> {
|
||||
|
||||
public GetWarmersRequestBuilder(ElasticsearchClient client, GetWarmersAction action, String... indices) {
|
||||
super(client, action, new GetWarmersRequest().indices(indices));
|
||||
}
|
||||
|
||||
public GetWarmersRequestBuilder setWarmers(String... warmers) {
|
||||
request.warmers(warmers);
|
||||
return this;
|
||||
}
|
||||
|
||||
public GetWarmersRequestBuilder addWarmers(String... warmers) {
|
||||
request.warmers(ArrayUtils.concat(request.warmers(), warmers));
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified
|
||||
* in the {@link GetWarmersRequest}. This information is fetched from the current master since the metadata
|
||||
* is contained inside the cluster-state
|
||||
*/
|
||||
public class GetWarmersResponse extends ActionResponse {
|
||||
|
||||
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
|
||||
GetWarmersResponse(ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers) {
|
||||
this.warmers = warmers;
|
||||
}
|
||||
|
||||
GetWarmersResponse() {
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
|
||||
return warmers;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
return warmers();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < size; i++) {
|
||||
String key = in.readString();
|
||||
int valueSize = in.readVInt();
|
||||
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
String name = in.readString();
|
||||
String[] types = in.readStringArray();
|
||||
IndexWarmersMetaData.SearchSource source = null;
|
||||
if (in.readBoolean()) {
|
||||
source = new IndexWarmersMetaData.SearchSource(in);
|
||||
}
|
||||
Boolean queryCache = null;
|
||||
queryCache = in.readOptionalBoolean();
|
||||
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
|
||||
name,
|
||||
types,
|
||||
queryCache,
|
||||
source)
|
||||
);
|
||||
}
|
||||
indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
|
||||
}
|
||||
warmers = indexMapBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(warmers.size());
|
||||
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
out.writeString(indexEntry.key);
|
||||
out.writeVInt(indexEntry.value.size());
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
|
||||
out.writeString(warmerEntry.name());
|
||||
out.writeStringArray(warmerEntry.types());
|
||||
boolean hasWarmerSource = warmerEntry != null;
|
||||
out.writeBoolean(hasWarmerSource);
|
||||
if (hasWarmerSource) {
|
||||
warmerEntry.source().writeTo(out);
|
||||
}
|
||||
out.writeOptionalBoolean(warmerEntry.requestCache());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Internal Actions executed on the master fetching the warmer from the cluster state metadata.
|
||||
*
|
||||
* Note: this is an internal API and should not be used / called by any client code.
|
||||
*/
|
||||
public class TransportGetWarmersAction extends TransportClusterInfoAction<GetWarmersRequest, GetWarmersResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetWarmersRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
// very lightweight operation, no need to fork
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetWarmersRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetWarmersResponse newResponse() {
|
||||
return new GetWarmersResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetWarmersResponse> listener) {
|
||||
ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
|
||||
concreteIndices, request.types(), request.warmers()
|
||||
);
|
||||
listener.onResponse(new GetWarmersResponse(result));
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Index / Search Warmer Administrative Actions
|
||||
* <p>
|
||||
* Index warming allows to run registered search requests to warm up the index before it is available for search.
|
||||
* With the near real time aspect of search, cold data (segments) will be warmed up before they become available for
|
||||
* search. This includes things such as the query cache, filesystem cache, and loading field data for fields.
|
||||
* </p>
|
||||
*
|
||||
* See the reference guide for more detailed information about the Indices / Search Warmer
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer;
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request that associates a {@link SearchRequest} with a name in the cluster that is
|
||||
* in-turn used to warm up indices before they are available for search.
|
||||
*
|
||||
* Note: neither the search request nor the name must be <code>null</code>
|
||||
*/
|
||||
public class PutWarmerRequest extends AcknowledgedRequest<PutWarmerRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String name;
|
||||
|
||||
private SearchRequest searchRequest;
|
||||
|
||||
public PutWarmerRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new warmer.
|
||||
*
|
||||
* @param name The name of the warmer.
|
||||
*/
|
||||
public PutWarmerRequest(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the name of the warmer.
|
||||
*/
|
||||
public PutWarmerRequest name(String name) {
|
||||
this.name = name;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the search request to warm.
|
||||
*/
|
||||
public PutWarmerRequest searchRequest(SearchRequest searchRequest) {
|
||||
this.searchRequest = searchRequest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the search request to warm.
|
||||
*/
|
||||
public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) {
|
||||
this.searchRequest = searchRequest.request();
|
||||
return this;
|
||||
}
|
||||
|
||||
public SearchRequest searchRequest() {
|
||||
return this.searchRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (searchRequest == null) {
|
||||
validationException = addValidationError("search request is missing", validationException);
|
||||
} else {
|
||||
validationException = searchRequest.validate();
|
||||
}
|
||||
if (name == null) {
|
||||
validationException = addValidationError("name is missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
if (searchRequest == null) {
|
||||
throw new IllegalStateException("unable to retrieve indices, search request is null");
|
||||
}
|
||||
return searchRequest.indices();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesRequest indices(String[] indices) {
|
||||
if (searchRequest == null) {
|
||||
throw new IllegalStateException("unable to set indices, search request is null");
|
||||
}
|
||||
searchRequest.indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
if (searchRequest == null) {
|
||||
throw new IllegalStateException("unable to retrieve indices options, search request is null");
|
||||
}
|
||||
return searchRequest.indicesOptions();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
name = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
searchRequest = new SearchRequest();
|
||||
searchRequest.readFrom(in);
|
||||
}
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(name);
|
||||
if (searchRequest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
searchRequest.writeTo(out);
|
||||
}
|
||||
writeTimeout(out);
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for {@link PutWarmerRequest}
|
||||
*
|
||||
* @see PutWarmerRequest for details
|
||||
*/
|
||||
public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
|
||||
|
||||
/**
|
||||
* Creates a new {@link PutWarmerRequestBuilder} with a given name.
|
||||
*/
|
||||
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action, String name) {
|
||||
super(client, action, new PutWarmerRequest().name(name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link PutWarmerRequestBuilder}
|
||||
* Note: {@link #setName(String)} must be called with a non-null value before this request is executed.
|
||||
*/
|
||||
public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action) {
|
||||
super(client, action, new PutWarmerRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the name of the warmer.
|
||||
*/
|
||||
public PutWarmerRequestBuilder setName(String name) {
|
||||
request.name(name);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the search request to use to warm the index when applicable.
|
||||
*/
|
||||
public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) {
|
||||
request.searchRequest(searchRequest);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the search request to use to warm the index when applicable.
|
||||
*/
|
||||
public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) {
|
||||
request.searchRequest(searchRequest);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* An acknowledged response of put warmer operation.
|
||||
*/
|
||||
public class PutWarmerResponse extends AcknowledgedResponse {
|
||||
|
||||
PutWarmerResponse() {
|
||||
super();
|
||||
}
|
||||
|
||||
PutWarmerResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
|
@ -1,167 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Internal Actions executed on the master associating a warmer with a name in the cluster state metadata.
|
||||
*
|
||||
* Note: this is an internal API and should not be used / called by any client code.
|
||||
*/
|
||||
public class TransportPutWarmerAction extends TransportMasterNodeAction<PutWarmerRequest, PutWarmerResponse> {
|
||||
|
||||
private final TransportSearchAction searchAction;
|
||||
|
||||
@Inject
|
||||
public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
|
||||
TransportSearchAction searchAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutWarmerRequest::new);
|
||||
this.searchAction = searchAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutWarmerResponse newResponse() {
|
||||
return new PutWarmerResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
ClusterBlockException status = state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
if (status != null) {
|
||||
return status;
|
||||
}
|
||||
// PutWarmer executes a SearchQuery before adding the new warmer to the cluster state,
|
||||
// so we need to check the same block as TransportSearchTypeAction here
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener<PutWarmerResponse> listener) {
|
||||
// first execute the search request, see that its ok...
|
||||
SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request);
|
||||
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
if (searchResponse.getFailedShards() > 0) {
|
||||
listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures())));
|
||||
return;
|
||||
}
|
||||
|
||||
clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask<PutWarmerResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected PutWarmerResponse newResponse(boolean acknowledged) {
|
||||
return new PutWarmerResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices());
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MetaData metaData = currentState.metaData();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(currentState, request.searchRequest().indicesOptions(), request.searchRequest().indices());
|
||||
|
||||
IndexWarmersMetaData.SearchSource source = null;
|
||||
if (request.searchRequest().source() != null) {
|
||||
source = new IndexWarmersMetaData.SearchSource(request.searchRequest().source());
|
||||
}
|
||||
|
||||
// now replace it on the metadata
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (warmers == null) {
|
||||
logger.info("[{}] putting warmer [{}]", index, request.name());
|
||||
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
boolean found = false;
|
||||
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>(warmers.entries().size() + 1);
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
if (entry.name().equals(request.name())) {
|
||||
found = true;
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
entries.add(entry);
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
logger.info("[{}] put warmer [{}]", index, request.name());
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
logger.info("[{}] update warmer [{}]", index, request.name());
|
||||
}
|
||||
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
|
||||
}
|
||||
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
|
||||
mdBuilder.put(indexBuilder);
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -26,10 +26,10 @@ import org.elasticsearch.action.ActionRunnable;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.ThreadedActionListener;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -51,20 +51,6 @@ import java.util.function.Supplier;
|
|||
* A base class for operations that needs to be performed on the master node.
|
||||
*/
|
||||
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
|
||||
private static final ClusterStateObserver.ChangePredicate masterNodeChangedPredicate = new ClusterStateObserver.ChangePredicate() {
|
||||
@Override
|
||||
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
|
||||
ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
|
||||
// The condition !newState.nodes().masterNodeId().equals(previousState.nodes().masterNodeId()) is not sufficient as the same master node might get reelected after a disruption.
|
||||
return newState.nodes().masterNodeId() != null && newState != previousState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterChangedEvent event) {
|
||||
return event.nodesDelta().masterNodeChanged();
|
||||
}
|
||||
};
|
||||
|
||||
protected final TransportService transportService;
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
|
@ -164,7 +150,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
if (t instanceof Discovery.FailedToCommitClusterStateException
|
||||
|| (t instanceof NotMasterException)) {
|
||||
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
|
||||
retry(t, masterNodeChangedPredicate);
|
||||
retry(t, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
@ -180,7 +166,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
} else {
|
||||
if (nodes.masterNode() == null) {
|
||||
logger.debug("no known master node, scheduling a retry");
|
||||
retry(null, masterNodeChangedPredicate);
|
||||
retry(null, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
|
||||
@Override
|
||||
|
@ -195,7 +181,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
// we want to retry here a bit to see if a new master is elected
|
||||
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
|
||||
actionName, nodes.masterNode(), exp.getDetailedMessage());
|
||||
retry(cause, masterNodeChangedPredicate);
|
||||
retry(cause, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
|
|
@ -844,11 +844,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// we never execute replication operation locally as primary operation has already completed locally
|
||||
// hence, we ignore any local shard for replication
|
||||
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
|
||||
performOnReplica(shard, shard.currentNodeId());
|
||||
performOnReplica(shard);
|
||||
}
|
||||
// send operation to relocating shard
|
||||
if (shard.relocating()) {
|
||||
performOnReplica(shard, shard.relocatingNodeId());
|
||||
performOnReplica(shard.buildTargetRelocatingShard());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -856,9 +856,10 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
/**
|
||||
* send replica operation to target node
|
||||
*/
|
||||
void performOnReplica(final ShardRouting shard, final String nodeId) {
|
||||
void performOnReplica(final ShardRouting shard) {
|
||||
// if we don't have that node, it means that it might have failed and will be created again, in
|
||||
// this case, we don't have to do the operation, and just let it failover
|
||||
String nodeId = shard.currentNodeId();
|
||||
if (!nodes.nodeExists(nodeId)) {
|
||||
logger.trace("failed to send action [{}] on replica [{}] for request [{}] due to unknown node [{}]", transportReplicaAction, shard.shardId(), replicaRequest, nodeId);
|
||||
onReplicaFailure(nodeId, null);
|
||||
|
|
|
@ -113,15 +113,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
|||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
/**
|
||||
|
@ -771,51 +762,6 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
ValidateQueryRequestBuilder prepareValidateQuery(String... indices);
|
||||
|
||||
/**
|
||||
* Puts an index search warmer to be applies when applicable.
|
||||
*/
|
||||
ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request);
|
||||
|
||||
/**
|
||||
* Puts an index search warmer to be applies when applicable.
|
||||
*/
|
||||
void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener);
|
||||
|
||||
/**
|
||||
* Puts an index search warmer to be applies when applicable.
|
||||
*/
|
||||
PutWarmerRequestBuilder preparePutWarmer(String name);
|
||||
|
||||
/**
|
||||
* Deletes an index warmer.
|
||||
*/
|
||||
ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request);
|
||||
|
||||
/**
|
||||
* Deletes an index warmer.
|
||||
*/
|
||||
void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener);
|
||||
|
||||
/**
|
||||
* Deletes an index warmer.
|
||||
*/
|
||||
DeleteWarmerRequestBuilder prepareDeleteWarmer();
|
||||
|
||||
/**
|
||||
* Returns a map of index warmers for the given get request.
|
||||
*/
|
||||
void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener);
|
||||
|
||||
/**
|
||||
* Returns a map of index warmers for the given get request.
|
||||
*/
|
||||
ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request);
|
||||
|
||||
/**
|
||||
* Returns a new builder to fetch index warmer metadata for the given indices.
|
||||
*/
|
||||
GetWarmersRequestBuilder prepareGetWarmers(String... indices);
|
||||
|
||||
/**
|
||||
* Executed a per index settings get request and returns the settings for the indices specified.
|
||||
* Note: this is a per index request and will not include settings that are set on the cluster
|
||||
|
|
|
@ -232,18 +232,6 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction
|
|||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
|
@ -1669,51 +1657,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<PutWarmerResponse> putWarmer(PutWarmerRequest request) {
|
||||
return execute(PutWarmerAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putWarmer(PutWarmerRequest request, ActionListener<PutWarmerResponse> listener) {
|
||||
execute(PutWarmerAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutWarmerRequestBuilder preparePutWarmer(String name) {
|
||||
return new PutWarmerRequestBuilder(this, PutWarmerAction.INSTANCE, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<DeleteWarmerResponse> deleteWarmer(DeleteWarmerRequest request) {
|
||||
return execute(DeleteWarmerAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteWarmer(DeleteWarmerRequest request, ActionListener<DeleteWarmerResponse> listener) {
|
||||
execute(DeleteWarmerAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteWarmerRequestBuilder prepareDeleteWarmer() {
|
||||
return new DeleteWarmerRequestBuilder(this, DeleteWarmerAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetWarmersRequestBuilder prepareGetWarmers(String... indices) {
|
||||
return new GetWarmersRequestBuilder(this, GetWarmersAction.INSTANCE, indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<GetWarmersResponse> getWarmers(GetWarmersRequest request) {
|
||||
return execute(GetWarmersAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getWarmers(GetWarmersRequest request, ActionListener<GetWarmersResponse> listener) {
|
||||
execute(GetWarmersAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetSettingsRequestBuilder prepareGetSettings(String... indices) {
|
||||
return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices);
|
||||
|
|
|
@ -17,30 +17,24 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate {
|
||||
INSTANCE;
|
||||
|
||||
/**
|
||||
* Action for the admin/warmers/put API.
|
||||
*/
|
||||
public class PutWarmerAction extends Action<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
|
||||
|
||||
public static final PutWarmerAction INSTANCE = new PutWarmerAction();
|
||||
public static final String NAME = "indices:admin/warmers/put";
|
||||
|
||||
private PutWarmerAction() {
|
||||
super(NAME);
|
||||
@Override
|
||||
public boolean apply(
|
||||
ClusterState previousState,
|
||||
ClusterState.ClusterStateStatus previousStatus,
|
||||
ClusterState newState,
|
||||
ClusterState.ClusterStateStatus newStatus) {
|
||||
// checking if the masterNodeId changed is insufficient as the
|
||||
// same master node might get re-elected after a disruption
|
||||
return newState.nodes().masterNodeId() != null && newState != previousState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutWarmerResponse newResponse() {
|
||||
return new PutWarmerResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutWarmerRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new PutWarmerRequestBuilder(client, this);
|
||||
public boolean apply(ClusterChangedEvent changedEvent) {
|
||||
return changedEvent.nodesDelta().masterNodeChanged();
|
||||
}
|
||||
}
|
|
@ -302,6 +302,10 @@ public class ShardStateAction extends AbstractComponent {
|
|||
this.failure = failure;
|
||||
}
|
||||
|
||||
public ShardRouting getShardRouting() {
|
||||
return shardRouting;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -92,11 +91,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
public static Map<String, Custom> customPrototypes = new HashMap<>();
|
||||
|
||||
static {
|
||||
// register non plugin custom metadata
|
||||
registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a custom index meta data factory. Make sure to call it from a static block.
|
||||
*/
|
||||
|
@ -904,6 +898,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
}
|
||||
}
|
||||
} else if ("warmers".equals(currentFieldName)) {
|
||||
// TODO: do this in 4.0:
|
||||
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
|
||||
// ignore: warmers have been removed in 3.0 and are
|
||||
// simply ignored when upgrading from 2.x
|
||||
assert Version.CURRENT.major <= 3;
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
Custom proto = lookupPrototype(currentFieldName);
|
||||
|
|
|
@ -55,12 +55,10 @@ import org.elasticsearch.index.store.IndexStoreConfig;
|
|||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.EnumSet;
|
||||
|
@ -71,7 +69,6 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
|
@ -365,49 +362,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
return indexMapBuilder.build();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
|
||||
assert uncheckedWarmers != null;
|
||||
assert concreteIndices != null;
|
||||
if (concreteIndices.length == 0) {
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
// special _all check to behave the same like not specifying anything for the warmers (not for the indices)
|
||||
final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
|
||||
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: make this a List so we don't have to copy below
|
||||
Collection<IndexWarmersMetaData.Entry> filteredWarmers =
|
||||
indexWarmersMetaData
|
||||
.entries()
|
||||
.stream()
|
||||
.filter(warmer -> {
|
||||
if (warmers.length != 0 && types.length != 0) {
|
||||
return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
|
||||
} else if (warmers.length != 0) {
|
||||
return Regex.simpleMatch(warmers, warmer.name());
|
||||
} else if (types.length != 0) {
|
||||
return Regex.simpleMatch(types, warmer.types());
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
})
|
||||
.collect(Collectors.toCollection(ArrayList::new));
|
||||
|
||||
if (!filteredWarmers.isEmpty()) {
|
||||
mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers)));
|
||||
}
|
||||
}
|
||||
return mapBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the concrete indices.
|
||||
*/
|
||||
|
|
|
@ -59,6 +59,7 @@ public enum MurmurHash3 {
|
|||
* Note, this hashing function might be used to persist hashes, so if the way hashes are computed
|
||||
* changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField).
|
||||
*/
|
||||
@SuppressWarnings("fallthrough") // Intentionally uses fallthrough to implement a well known hashing algorithm
|
||||
public static Hash128 hash128(byte[] key, int offset, int length, long seed, Hash128 hash) {
|
||||
long h1 = seed;
|
||||
long h2 = seed;
|
||||
|
|
|
@ -258,6 +258,12 @@ public final class FactoryProvider2<F> implements InvocationHandler, Provider<F>
|
|||
return o == this || o == factory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// This way both this and its factory hash to the same spot, making hashCode consistent.
|
||||
return factory.hashCode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if {@code thrown} can be thrown by {@code invoked} without wrapping.
|
||||
*/
|
||||
|
|
|
@ -86,11 +86,6 @@ import java.util.Objects;
|
|||
*
|
||||
*/
|
||||
public class Lucene {
|
||||
|
||||
// TODO: remove VERSION, and have users use Version.LATEST.
|
||||
public static final Version VERSION = Version.LATEST;
|
||||
public static final Version ANALYZER_VERSION = VERSION;
|
||||
public static final Version QUERYPARSER_VERSION = VERSION;
|
||||
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
|
||||
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
|
||||
public static final String LATEST_CODEC = "Lucene54";
|
||||
|
@ -109,7 +104,6 @@ public class Lucene {
|
|||
|
||||
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f);
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
|
||||
if (version == null) {
|
||||
return defaultVersion;
|
||||
|
|
|
@ -88,9 +88,6 @@ import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemp
|
|||
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
|
||||
import org.elasticsearch.rest.action.bulk.RestBulkAction;
|
||||
import org.elasticsearch.rest.action.cat.AbstractCatAction;
|
||||
import org.elasticsearch.rest.action.cat.RestAliasAction;
|
||||
|
@ -209,10 +206,6 @@ public class NetworkModule extends AbstractModule {
|
|||
RestDeleteIndexTemplateAction.class,
|
||||
RestHeadIndexTemplateAction.class,
|
||||
|
||||
RestPutWarmerAction.class,
|
||||
RestDeleteWarmerAction.class,
|
||||
RestGetWarmerAction.class,
|
||||
|
||||
RestPutMappingAction.class,
|
||||
RestGetMappingAction.class,
|
||||
RestGetFieldMappingAction.class,
|
||||
|
|
|
@ -519,6 +519,7 @@ public class BloomFilter {
|
|||
return k;
|
||||
}
|
||||
|
||||
@SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm
|
||||
public static long hash3_x64_128(byte[] key, int offset, int length, long seed) {
|
||||
final int nblocks = length >> 4; // Process as 128-bit blocks.
|
||||
|
||||
|
@ -598,7 +599,7 @@ public class BloomFilter {
|
|||
case 2:
|
||||
k1 ^= ((long) key[offset + 1]) << 8;
|
||||
case 1:
|
||||
k1 ^= ((long) key[offset]);
|
||||
k1 ^= (key[offset]);
|
||||
k1 *= c1;
|
||||
k1 = rotl64(k1, 31);
|
||||
k1 *= c2;
|
||||
|
|
|
@ -89,12 +89,12 @@ public class Analysis {
|
|||
// check for explicit version on the specific analyzer component
|
||||
String sVersion = settings.get("version");
|
||||
if (sVersion != null) {
|
||||
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
|
||||
return Lucene.parseVersion(sVersion, Version.LATEST, logger);
|
||||
}
|
||||
// check for explicit version on the index itself as default for all analysis components
|
||||
sVersion = indexSettings.get("index.analysis.version");
|
||||
if (sVersion != null) {
|
||||
return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
|
||||
return Lucene.parseVersion(sVersion, Version.LATEST, logger);
|
||||
}
|
||||
// resolve the analysis version based on the version the index was created with
|
||||
return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion;
|
||||
|
|
|
@ -58,11 +58,16 @@ public class SingleFieldsVisitor extends FieldsVisitor {
|
|||
|
||||
public void postProcess(MappedFieldType fieldType) {
|
||||
if (uid != null) {
|
||||
// TODO: this switch seems very wrong...either each case should be breaking, or this should not be a switch
|
||||
switch (field) {
|
||||
case UidFieldMapper.NAME: addValue(field, uid.toString());
|
||||
case IdFieldMapper.NAME: addValue(field, uid.id());
|
||||
case TypeFieldMapper.NAME: addValue(field, uid.type());
|
||||
case UidFieldMapper.NAME:
|
||||
addValue(field, uid.toString());
|
||||
break;
|
||||
case IdFieldMapper.NAME:
|
||||
addValue(field, uid.id());
|
||||
break;
|
||||
case TypeFieldMapper.NAME:
|
||||
addValue(field, uid.type());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.IndexShardComponent;
|
|||
import java.io.Closeable;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
|
@ -440,7 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
if (config.isSyncOnEachOperation()) {
|
||||
current.sync();
|
||||
}
|
||||
assert current.assertBytesAtLocation(location, bytes);
|
||||
assert assertBytesAtLocation(location, bytes);
|
||||
return location;
|
||||
}
|
||||
} catch (AlreadyClosedException | IOException ex) {
|
||||
|
@ -454,6 +455,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
}
|
||||
|
||||
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
|
||||
// tests can override this
|
||||
ByteBuffer buffer = ByteBuffer.allocate(location.size);
|
||||
current.readBytes(buffer, location.translogLocation);
|
||||
return new BytesArray(buffer.array()).equals(expectedBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshots the current transaction log allowing to safely iterate over the snapshot.
|
||||
* Snapshots are fixed in time and will not be updated with future operations.
|
||||
|
|
|
@ -218,11 +218,6 @@ public class TranslogWriter extends TranslogReader {
|
|||
}
|
||||
}
|
||||
|
||||
boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(location.size);
|
||||
readBytes(buffer, location.translogLocation);
|
||||
return new BytesArray(buffer.array()).equals(expectedBytes);
|
||||
}
|
||||
|
||||
private long getWrittenOffset() throws IOException {
|
||||
return channelReference.getChannel().position();
|
||||
|
|
|
@ -254,7 +254,7 @@ public class PercolatorService extends AbstractComponent {
|
|||
}
|
||||
PercolatorQuery percolatorQuery = builder.build();
|
||||
|
||||
if (context.isOnlyCount()) {
|
||||
if (context.isOnlyCount() || context.size() == 0) {
|
||||
TotalHitCountCollector collector = new TotalHitCountCollector();
|
||||
context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector));
|
||||
if (aggregatorCollector != null) {
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.rest.RestController;
|
|||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.support.RestBuilderListener;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -100,9 +99,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
|
|||
case SETTINGS:
|
||||
writeSettings(response.settings().get(index), builder, request);
|
||||
break;
|
||||
case WARMERS:
|
||||
writeWarmers(response.warmers().get(index), builder, request);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("feature [" + feature + "] is not valid");
|
||||
}
|
||||
|
@ -142,15 +138,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
private void writeWarmers(List<IndexWarmersMetaData.Entry> warmers, XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.WARMERS);
|
||||
if (warmers != null) {
|
||||
for (IndexWarmersMetaData.Entry warmer : warmers) {
|
||||
IndexWarmersMetaData.toXContent(warmer, builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.rest.action.admin.indices.warmer.delete;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.DELETE;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class RestDeleteWarmerAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestDeleteWarmerAction(Settings settings, RestController controller, Client client) {
|
||||
super(settings, controller, client);
|
||||
controller.registerHandler(DELETE, "/{index}/_warmer", this);
|
||||
controller.registerHandler(DELETE, "/{index}/_warmer/{name}", this);
|
||||
controller.registerHandler(DELETE, "/{index}/_warmers", this);
|
||||
controller.registerHandler(DELETE, "/{index}/_warmers/{name}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name")))
|
||||
.indices(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout()));
|
||||
deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout()));
|
||||
deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions()));
|
||||
client.admin().indices().deleteWarmer(deleteWarmerRequest, new AcknowledgedRestListener<DeleteWarmerResponse>(channel));
|
||||
}
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.rest.action.admin.indices.warmer.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.support.RestBuilderListener;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class RestGetWarmerAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestGetWarmerAction(Settings settings, RestController controller, Client client) {
|
||||
super(settings, controller, client);
|
||||
controller.registerHandler(GET, "/_warmer/{name}", this);
|
||||
controller.registerHandler(GET, "/{index}/_warmer/{name}", this);
|
||||
controller.registerHandler(GET, "/{index}/_warmers/{name}", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}/_warmer/{name}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
final String[] types = Strings.splitStringByCommaToArray(request.param("type"));
|
||||
final String[] names = request.paramAsStringArray("name", Strings.EMPTY_ARRAY);
|
||||
|
||||
GetWarmersRequest getWarmersRequest = new GetWarmersRequest();
|
||||
getWarmersRequest.indices(indices).types(types).warmers(names);
|
||||
getWarmersRequest.local(request.paramAsBoolean("local", getWarmersRequest.local()));
|
||||
getWarmersRequest.indicesOptions(IndicesOptions.fromRequest(request, getWarmersRequest.indicesOptions()));
|
||||
client.admin().indices().getWarmers(getWarmersRequest, new RestBuilderListener<GetWarmersResponse>(channel) {
|
||||
|
||||
@Override
|
||||
public RestResponse buildResponse(GetWarmersResponse response, XContentBuilder builder) throws Exception {
|
||||
if (indices.length > 0 && response.warmers().isEmpty()) {
|
||||
return new BytesRestResponse(OK, builder.startObject().endObject());
|
||||
}
|
||||
|
||||
builder.startObject();
|
||||
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry : response.warmers()) {
|
||||
builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : entry.value) {
|
||||
IndexWarmersMetaData.toXContent(warmerEntry, builder, request);
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.rest.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
|
||||
import org.elasticsearch.rest.action.support.RestActions;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.PUT;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class RestPutWarmerAction extends BaseRestHandler {
|
||||
|
||||
private final IndicesQueriesRegistry queryRegistry;
|
||||
|
||||
@Inject
|
||||
public RestPutWarmerAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry) {
|
||||
super(settings, controller, client);
|
||||
this.queryRegistry = queryRegistry;
|
||||
controller.registerHandler(PUT, "/_warmer/{name}", this);
|
||||
controller.registerHandler(PUT, "/{index}/_warmer/{name}", this);
|
||||
controller.registerHandler(PUT, "/{index}/{type}/_warmer/{name}", this);
|
||||
|
||||
controller.registerHandler(PUT, "/_warmers/{name}", this);
|
||||
controller.registerHandler(PUT, "/{index}/_warmers/{name}", this);
|
||||
controller.registerHandler(PUT, "/{index}/{type}/_warmers/{name}", this);
|
||||
|
||||
controller.registerHandler(POST, "/_warmer/{name}", this);
|
||||
controller.registerHandler(POST, "/{index}/_warmer/{name}", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_warmer/{name}", this);
|
||||
|
||||
controller.registerHandler(POST, "/_warmers/{name}", this);
|
||||
controller.registerHandler(POST, "/{index}/_warmers/{name}", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_warmers/{name}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException {
|
||||
PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name"));
|
||||
|
||||
BytesReference sourceBytes = RestActions.getRestContent(request);
|
||||
SearchSourceBuilder source = RestActions.getRestSearchSource(sourceBytes, queryRegistry, parseFieldMatcher);
|
||||
SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")))
|
||||
.types(Strings.splitStringByCommaToArray(request.param("type")))
|
||||
.requestCache(request.paramAsBoolean("request_cache", null)).source(source);
|
||||
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
|
||||
putWarmerRequest.searchRequest(searchRequest);
|
||||
putWarmerRequest.timeout(request.paramAsTime("timeout", putWarmerRequest.timeout()));
|
||||
putWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putWarmerRequest.masterNodeTimeout()));
|
||||
client.admin().indices().putWarmer(putWarmerRequest, new AcknowledgedRestListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -94,7 +93,6 @@ import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
|||
import org.elasticsearch.search.internal.ScrollContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext.Lifetime;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.profile.Profilers;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
|
@ -102,7 +100,6 @@ import org.elasticsearch.search.query.QuerySearchRequest;
|
|||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -201,7 +198,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
|
||||
this.indicesWarmer.addListener(new NormsWarmer(indicesWarmer));
|
||||
this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer));
|
||||
this.indicesWarmer.addListener(new SearchWarmer());
|
||||
|
||||
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
|
||||
|
@ -1163,76 +1159,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
}
|
||||
}
|
||||
|
||||
class SearchWarmer implements IndicesWarmer.Listener {
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
return internalWarm(indexShard, searcher, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
return internalWarm(indexShard, searcher, true);
|
||||
}
|
||||
|
||||
public TerminationHandle internalWarm(final IndexShard indexShard, final Engine.Searcher searcher, final boolean top) {
|
||||
IndexWarmersMetaData custom = indexShard.getIndexSettings().getIndexMetaData().custom(IndexWarmersMetaData.TYPE);
|
||||
if (custom == null) {
|
||||
return TerminationHandle.NO_WAIT;
|
||||
}
|
||||
final Executor executor = indicesWarmer.getExecutor();
|
||||
final CountDownLatch latch = new CountDownLatch(custom.entries().size());
|
||||
for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
|
||||
executor.execute(() -> {
|
||||
SearchContext context = null;
|
||||
try {
|
||||
long now = System.nanoTime();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(indexShard.shardId().index().name());
|
||||
QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry());
|
||||
queryParseContext.parseFieldMatcher(indexService.getIndexSettings().getParseFieldMatcher());
|
||||
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexShard.getIndexSettings()
|
||||
.getNumberOfShards(),
|
||||
SearchType.QUERY_THEN_FETCH, entry.source().build(queryParseContext), entry.types(), entry.requestCache());
|
||||
context = createContext(request, searcher);
|
||||
// if we use sort, we need to do query to sort on
|
||||
// it and load relevant field data
|
||||
// if not, we might as well set size=0 (and cache
|
||||
// if needed)
|
||||
if (context.sort() == null) {
|
||||
context.size(0);
|
||||
}
|
||||
boolean canCache = indicesQueryCache.canCache(request, context);
|
||||
// early terminate when we can cache, since we
|
||||
// can only do proper caching on top level searcher
|
||||
// also, if we can't cache, and its top, we don't
|
||||
// need to execute it, since we already did when its
|
||||
// not top
|
||||
if (canCache != top) {
|
||||
return;
|
||||
}
|
||||
loadOrExecuteQueryPhase(request, context, queryPhase);
|
||||
long took = System.nanoTime() - now;
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
|
||||
} finally {
|
||||
try {
|
||||
if (context != null) {
|
||||
freeContext(context.id());
|
||||
cleanContext(context);
|
||||
}
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return () -> latch.await();
|
||||
}
|
||||
}
|
||||
|
||||
class Reaper implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.search.warmer;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class IndexWarmerMissingException extends ElasticsearchException {
|
||||
|
||||
private final String[] names;
|
||||
|
||||
public IndexWarmerMissingException(String... names) {
|
||||
super("index_warmer " + Arrays.toString(names) + " missing");
|
||||
this.names = names;
|
||||
}
|
||||
|
||||
public String[] names() {
|
||||
return this.names;
|
||||
}
|
||||
|
||||
|
||||
public IndexWarmerMissingException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
names = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.NOT_FOUND;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(names);
|
||||
}
|
||||
}
|
|
@ -1,354 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.warmer;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentGenerator;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom> implements IndexMetaData.Custom {
|
||||
|
||||
public static final String TYPE = "warmers";
|
||||
|
||||
public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData();
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
IndexWarmersMetaData that = (IndexWarmersMetaData) o;
|
||||
|
||||
return entries.equals(that.entries);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return entries.hashCode();
|
||||
}
|
||||
|
||||
public static class Entry {
|
||||
private final String name;
|
||||
private final String[] types;
|
||||
private final SearchSource source;
|
||||
private final Boolean requestCache;
|
||||
|
||||
public Entry(String name, String[] types, Boolean requestCache, SearchSource source) {
|
||||
this.name = name;
|
||||
this.types = types == null ? Strings.EMPTY_ARRAY : types;
|
||||
this.source = source;
|
||||
this.requestCache = requestCache;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String[] types() {
|
||||
return this.types;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public SearchSource source() {
|
||||
return this.source;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Boolean requestCache() {
|
||||
return this.requestCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Entry entry = (Entry) o;
|
||||
|
||||
if (!name.equals(entry.name)) return false;
|
||||
if (!Arrays.equals(types, entry.types)) return false;
|
||||
if (!source.equals(entry.source)) return false;
|
||||
return Objects.equals(requestCache, entry.requestCache);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(types);
|
||||
result = 31 * result + source.hashCode();
|
||||
result = 31 * result + (requestCache != null ? requestCache.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private final List<Entry> entries;
|
||||
|
||||
|
||||
public IndexWarmersMetaData(Entry... entries) {
|
||||
this.entries = Arrays.asList(entries);
|
||||
}
|
||||
|
||||
public List<Entry> entries() {
|
||||
return this.entries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexWarmersMetaData readFrom(StreamInput in) throws IOException {
|
||||
Entry[] entries = new Entry[in.readVInt()];
|
||||
for (int i = 0; i < entries.length; i++) {
|
||||
String name = in.readString();
|
||||
String[] types = in.readStringArray();
|
||||
SearchSource source = null;
|
||||
if (in.readBoolean()) {
|
||||
source = new SearchSource(in);
|
||||
}
|
||||
Boolean queryCache;
|
||||
queryCache = in.readOptionalBoolean();
|
||||
entries[i] = new Entry(name, types, queryCache, source);
|
||||
}
|
||||
return new IndexWarmersMetaData(entries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(entries().size());
|
||||
for (Entry entry : entries()) {
|
||||
out.writeString(entry.name());
|
||||
out.writeStringArray(entry.types());
|
||||
if (entry.source() == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
entry.source.writeTo(out);
|
||||
}
|
||||
out.writeOptionalBoolean(entry.requestCache());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexWarmersMetaData fromMap(Map<String, Object> map) throws IOException {
|
||||
// if it starts with the type, remove it
|
||||
if (map.size() == 1 && map.containsKey(TYPE)) {
|
||||
map = (Map<String, Object>) map.values().iterator().next();
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.smileBuilder().map(map);
|
||||
try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) {
|
||||
// move to START_OBJECT
|
||||
parser.nextToken();
|
||||
return fromXContent(parser);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
// we get here after we are at warmers token
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
List<Entry> entries = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
String name = currentFieldName;
|
||||
List<String> types = new ArrayList<>(2);
|
||||
SearchSource source = null;
|
||||
Boolean queryCache = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("types".equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
types.add(parser.text());
|
||||
}
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("source".equals(currentFieldName)) {
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
try (XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out)) {
|
||||
generator.copyCurrentStructure(parser);
|
||||
}
|
||||
source = new SearchSource(new BytesArray(out.toByteArray()));
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
if ("source".equals(currentFieldName)) {
|
||||
source = new SearchSource(new BytesArray(parser.binaryValue()));
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("requestCache".equals(currentFieldName) || "request_cache".equals(currentFieldName)) {
|
||||
queryCache = parser.booleanValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source));
|
||||
}
|
||||
}
|
||||
return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
//No need, IndexMetaData already writes it
|
||||
//builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (Entry entry : entries()) {
|
||||
toXContent(entry, builder, params);
|
||||
}
|
||||
//No need, IndexMetaData already writes it
|
||||
//builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("types", entry.types());
|
||||
if (entry.requestCache() != null) {
|
||||
builder.field("requestCache", entry.requestCache());
|
||||
}
|
||||
builder.field("source", entry.source());
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) {
|
||||
IndexWarmersMetaData second = (IndexWarmersMetaData) other;
|
||||
List<Entry> entries = new ArrayList<>();
|
||||
entries.addAll(entries());
|
||||
for (Entry secondEntry : second.entries()) {
|
||||
boolean found = false;
|
||||
for (Entry firstEntry : entries()) {
|
||||
if (firstEntry.name().equals(secondEntry.name())) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
entries.add(secondEntry);
|
||||
}
|
||||
}
|
||||
return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()]));
|
||||
}
|
||||
|
||||
public static class SearchSource extends ToXContentToBytes implements Writeable<SearchSource> {
|
||||
private final BytesReference binary;
|
||||
private SearchSourceBuilder cached;
|
||||
|
||||
public SearchSource(BytesReference bytesArray) {
|
||||
if (bytesArray == null) {
|
||||
throw new IllegalArgumentException("bytesArray must not be null");
|
||||
}
|
||||
this.binary = bytesArray;
|
||||
}
|
||||
|
||||
public SearchSource(StreamInput input) throws IOException {
|
||||
this(input.readBytesReference());
|
||||
}
|
||||
|
||||
public SearchSource(SearchSourceBuilder source) {
|
||||
try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) {
|
||||
source.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
binary = builder.bytes();
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to generate XContent", ex);
|
||||
}
|
||||
}
|
||||
|
||||
public SearchSourceBuilder build(QueryParseContext ctx) throws IOException {
|
||||
if (cached == null) {
|
||||
try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) {
|
||||
ctx.reset(parser);
|
||||
cached = SearchSourceBuilder.parseSearchSource(parser, ctx);
|
||||
}
|
||||
}
|
||||
return cached;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (binary == null) {
|
||||
cached.toXContent(builder, params);
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(binary).createParser(binary)) {
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBytesReference(binary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchSource readFrom(StreamInput in) throws IOException {
|
||||
return new SearchSource(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
SearchSource that = (SearchSource) o;
|
||||
|
||||
return binary.equals(that.binary);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return binary.hashCode();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -363,7 +363,7 @@ public class ThreadPool extends AbstractComponent {
|
|||
if (!Names.SAME.equals(name)) {
|
||||
command = new ThreadedRunnable(command, executor(name));
|
||||
}
|
||||
return scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS);
|
||||
return scheduler.schedule(new LoggingRunnable(command), delay.millis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
|
@ -639,6 +639,7 @@ public class ThreadPool extends AbstractComponent {
|
|||
runnable.run();
|
||||
} catch (Throwable t) {
|
||||
logger.warn("failed to run {}", t, runnable.toString());
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,6 @@ import org.elasticsearch.search.SearchException;
|
|||
import org.elasticsearch.search.SearchParseException;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestSearchContext;
|
||||
|
@ -494,12 +493,6 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
assertEquals("[_na] msg", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testIndexWarmerMissingException() throws IOException {
|
||||
IndexWarmerMissingException ex = serialize(new IndexWarmerMissingException("w1", "w2"));
|
||||
assertEquals("index_warmer [w1, w2] missing", ex.getMessage());
|
||||
assertArrayEquals(new String[]{"w1", "w2"}, ex.names());
|
||||
}
|
||||
|
||||
public void testIndexTemplateMissingException() throws IOException {
|
||||
IndexTemplateMissingException ex = serialize(new IndexTemplateMissingException("name"));
|
||||
assertEquals("index_template [name] missing", ex.getMessage());
|
||||
|
@ -735,7 +728,6 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class);
|
||||
ids.put(91, org.elasticsearch.search.aggregations.AggregationInitializationException.class);
|
||||
ids.put(92, org.elasticsearch.indices.recovery.DelayRecoveryException.class);
|
||||
ids.put(93, org.elasticsearch.search.warmer.IndexWarmerMissingException.class);
|
||||
ids.put(94, org.elasticsearch.client.transport.NoNodeAvailableException.class);
|
||||
ids.put(95, null);
|
||||
ids.put(96, org.elasticsearch.snapshots.InvalidSnapshotNameException.class);
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -52,7 +51,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("idx").addAlias(new Alias("alias_idx")).addMapping("type1", "{\"type1\":{}}")
|
||||
.setSettings(Settings.builder().put("number_of_shards", 1)).get());
|
||||
ensureSearchable("idx");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("idx")).get());
|
||||
createIndex("empty_idx");
|
||||
ensureSearchable("idx", "empty_idx");
|
||||
}
|
||||
|
@ -66,7 +64,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertAliases(response, "idx");
|
||||
assertMappings(response, "idx");
|
||||
assertSettings(response, "idx");
|
||||
assertWarmers(response, "idx");
|
||||
}
|
||||
|
||||
public void testSimpleUnknownIndex() {
|
||||
|
@ -87,7 +84,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertEmptyAliases(response);
|
||||
assertEmptyOrOnlyDefaultMappings(response, "empty_idx");
|
||||
assertNonEmptySettings(response, "empty_idx");
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
|
||||
public void testSimpleMapping() {
|
||||
|
@ -100,7 +96,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertMappings(response, "idx");
|
||||
assertEmptyAliases(response);
|
||||
assertEmptySettings(response);
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
|
||||
public void testSimpleAlias() {
|
||||
|
@ -113,7 +108,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertAliases(response, "idx");
|
||||
assertEmptyMappings(response);
|
||||
assertEmptySettings(response);
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
|
||||
public void testSimpleSettings() {
|
||||
|
@ -126,20 +120,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertSettings(response, "idx");
|
||||
assertEmptyAliases(response);
|
||||
assertEmptyMappings(response);
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
|
||||
public void testSimpleWarmer() {
|
||||
GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
|
||||
Feature.WARMERS);
|
||||
String[] indices = response.indices();
|
||||
assertThat(indices, notNullValue());
|
||||
assertThat(indices.length, equalTo(1));
|
||||
assertThat(indices[0], equalTo("idx"));
|
||||
assertWarmers(response, "idx");
|
||||
assertEmptyAliases(response);
|
||||
assertEmptyMappings(response);
|
||||
assertEmptySettings(response);
|
||||
}
|
||||
|
||||
public void testSimpleMixedFeatures() {
|
||||
|
@ -169,11 +149,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
} else {
|
||||
assertEmptySettings(response);
|
||||
}
|
||||
if (features.contains(Feature.WARMERS)) {
|
||||
assertWarmers(response, "idx");
|
||||
} else {
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
}
|
||||
|
||||
public void testEmptyMixedFeatures() {
|
||||
|
@ -199,7 +174,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
} else {
|
||||
assertEmptySettings(response);
|
||||
}
|
||||
assertEmptyWarmers(response);
|
||||
}
|
||||
|
||||
public void testGetIndexWithBlocks() {
|
||||
|
@ -235,18 +209,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void assertWarmers(GetIndexResponse response, String indexName) {
|
||||
ImmutableOpenMap<String, List<Entry>> warmers = response.warmers();
|
||||
assertThat(warmers, notNullValue());
|
||||
assertThat(warmers.size(), equalTo(1));
|
||||
List<Entry> indexWarmers = warmers.get(indexName);
|
||||
assertThat(indexWarmers, notNullValue());
|
||||
assertThat(indexWarmers.size(), equalTo(1));
|
||||
Entry warmer = indexWarmers.get(0);
|
||||
assertThat(warmer, notNullValue());
|
||||
assertThat(warmer.name(), equalTo("warmer1"));
|
||||
}
|
||||
|
||||
private void assertSettings(GetIndexResponse response, String indexName) {
|
||||
ImmutableOpenMap<String, Settings> settings = response.settings();
|
||||
assertThat(settings, notNullValue());
|
||||
|
@ -305,11 +267,6 @@ public class GetIndexIT extends ESIntegTestCase {
|
|||
assertThat(alias.alias(), equalTo("alias_idx"));
|
||||
}
|
||||
|
||||
private void assertEmptyWarmers(GetIndexResponse response) {
|
||||
assertThat(response.warmers(), notNullValue());
|
||||
assertThat(response.warmers().isEmpty(), equalTo(true));
|
||||
}
|
||||
|
||||
private void assertEmptySettings(GetIndexResponse response) {
|
||||
assertThat(response.settings(), notNullValue());
|
||||
assertThat(response.settings().isEmpty(), equalTo(true));
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer.put;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class PutWarmerRequestTests extends ESTestCase {
|
||||
// issue 4196
|
||||
public void testThatValidationWithoutSpecifyingSearchRequestFails() {
|
||||
PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo");
|
||||
ActionRequestValidationException validationException = putWarmerRequest.validate();
|
||||
assertThat(validationException.validationErrors(), hasSize(1));
|
||||
assertThat(validationException.getMessage(), containsString("search request is missing"));
|
||||
}
|
||||
}
|
|
@ -65,6 +65,7 @@ import org.junit.BeforeClass;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
@ -75,9 +76,13 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
|
||||
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
@ -486,7 +491,39 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
replicationPhase.run();
|
||||
final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests();
|
||||
transport.clear();
|
||||
assertThat(capturedRequests.length, equalTo(assignedReplicas));
|
||||
|
||||
HashMap<String, Request> nodesSentTo = new HashMap<>();
|
||||
boolean executeOnReplica =
|
||||
action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings());
|
||||
for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) {
|
||||
// no duplicate requests
|
||||
Request replicationRequest = (Request) capturedRequest.request;
|
||||
assertNull(nodesSentTo.put(capturedRequest.node.getId(), replicationRequest));
|
||||
// the request is hitting the correct shard
|
||||
assertEquals(request.shardId, replicationRequest.shardId);
|
||||
}
|
||||
|
||||
// no request was sent to the local node
|
||||
assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId())));
|
||||
|
||||
// requests were sent to the correct shard copies
|
||||
for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId.getIndex(), shardId.id())) {
|
||||
if (shard.primary() == false && executeOnReplica == false) {
|
||||
continue;
|
||||
}
|
||||
if (shard.unassigned()) {
|
||||
continue;
|
||||
}
|
||||
if (shard.primary() == false) {
|
||||
nodesSentTo.remove(shard.currentNodeId());
|
||||
}
|
||||
if (shard.relocating()) {
|
||||
nodesSentTo.remove(shard.relocatingNodeId());
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(nodesSentTo.entrySet(), is(empty()));
|
||||
|
||||
if (assignedReplicas > 0) {
|
||||
assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false));
|
||||
}
|
||||
|
@ -511,6 +548,12 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
transport.clear();
|
||||
assertEquals(1, shardFailedRequests.length);
|
||||
CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0];
|
||||
// get the shard the request was sent to
|
||||
ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id());
|
||||
// and the shard that was requested to be failed
|
||||
ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry)shardFailedRequest.request;
|
||||
// the shard the request was sent to and the shard to be failed should be the same
|
||||
assertEquals(shardRoutingEntry.getShardRouting(), routing);
|
||||
failures.add(shardFailedRequest);
|
||||
transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.AliasMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
|
||||
import org.elasticsearch.test.ESBackcompatTestCase;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -88,21 +87,4 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||
assertThat(settings.get("index.number_of_shards"), equalTo("1"));
|
||||
}
|
||||
|
||||
public void testGetWarmers() throws Exception {
|
||||
createIndex("test");
|
||||
ensureSearchable("test");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("test")).get());
|
||||
ensureSearchable("test");
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS)
|
||||
.execute().actionGet();
|
||||
ImmutableOpenMap<String, List<Entry>> warmersMap = getIndexResponse.warmers();
|
||||
assertThat(warmersMap, notNullValue());
|
||||
assertThat(warmersMap.size(), equalTo(1));
|
||||
List<Entry> warmersList = warmersMap.get("test");
|
||||
assertThat(warmersList, notNullValue());
|
||||
assertThat(warmersList.size(), equalTo(1));
|
||||
Entry warmer = warmersList.get(0);
|
||||
assertThat(warmer, notNullValue());
|
||||
assertThat(warmer.name(), equalTo("warmer1"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,20 +32,25 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
|
|||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.MultiDataPathUpgrader;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
|
@ -423,4 +428,62 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
UpgradeIT.assertUpgraded(client(), indexName);
|
||||
}
|
||||
|
||||
private Path getNodeDir(String indexFile) throws IOException {
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
|
||||
// decompress the index
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexFile);
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
Path[] list = FileSystemUtils.files(unzipDataDir);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster");
|
||||
}
|
||||
|
||||
// the bwc scripts packs the indices under this path
|
||||
return list[0].resolve("nodes/0/");
|
||||
}
|
||||
|
||||
public void testOldClusterStates() throws Exception {
|
||||
// dangling indices do not load the global state, only the per-index states
|
||||
// so we make sure we can read them separately
|
||||
MetaDataStateFormat<MetaData> globalFormat = new MetaDataStateFormat<MetaData>(XContentType.JSON, "global-") {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return MetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
MetaDataStateFormat<IndexMetaData> indexFormat = new MetaDataStateFormat<IndexMetaData>(XContentType.JSON, "state-") {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return IndexMetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
Collections.shuffle(indexes, random());
|
||||
for (String indexFile : indexes) {
|
||||
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
||||
Path nodeDir = getNodeDir(indexFile);
|
||||
logger.info("Parsing cluster state files from index [" + indexName + "]");
|
||||
assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception
|
||||
Path indexDir = nodeDir.resolve("indices").resolve(indexName);
|
||||
assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
|
|||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -51,9 +52,12 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -61,6 +65,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -796,7 +801,92 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
assertTrue(published.get());
|
||||
}
|
||||
|
||||
public void testClusterStateBatchedUpdates() throws InterruptedException {
|
||||
// test that for a single thread, tasks are executed in the order
|
||||
// that they are submitted
|
||||
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
|
||||
List<Integer> tasks = new ArrayList<>();
|
||||
|
||||
@Override
|
||||
public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
|
||||
this.tasks.addAll(tasks);
|
||||
return BatchResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int numberOfThreads = randomIntBetween(2, 8);
|
||||
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
executors[i] = new TaskExecutor();
|
||||
}
|
||||
|
||||
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
|
||||
|
||||
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
|
||||
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
|
||||
|
||||
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure: [{}]", t, source);
|
||||
failures.add(new Tuple<>(source, t));
|
||||
updateLatch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
updateLatch.countDown();
|
||||
}
|
||||
};
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
final int index = i;
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
for (int j = 0; j < tasksSubmittedPerThread; j++) {
|
||||
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
|
||||
}
|
||||
barrier.await();
|
||||
} catch (InterruptedException | BrokenBarrierException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
|
||||
updateLatch.await();
|
||||
|
||||
assertThat(failures, empty());
|
||||
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
|
||||
for (int j = 0; j < tasksSubmittedPerThread; j++) {
|
||||
assertNotNull(executors[i].tasks.get(j));
|
||||
assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
|
@ -884,19 +974,12 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
counts.merge(executor, 1, (previous, one) -> previous + one);
|
||||
}
|
||||
|
||||
CountDownLatch startGate = new CountDownLatch(1);
|
||||
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
|
||||
AtomicBoolean interrupted = new AtomicBoolean();
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
final int index = i;
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
try {
|
||||
startGate.await();
|
||||
} catch (InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
return;
|
||||
}
|
||||
barrier.await();
|
||||
for (int j = 0; j < tasksSubmittedPerThread; j++) {
|
||||
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
|
||||
clusterService.submitStateUpdateTask(
|
||||
|
@ -906,16 +989,18 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
executor,
|
||||
listener);
|
||||
}
|
||||
} finally {
|
||||
endGate.countDown();
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
|
||||
startGate.countDown();
|
||||
endGate.await();
|
||||
assertFalse(interrupted.get());
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
|
||||
// wait until all the cluster state updates have been processed
|
||||
updateLatch.await();
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -49,7 +48,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
|
@ -492,9 +490,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
builder.settings(settingsBuilder);
|
||||
builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10));
|
||||
int aliasCount = randomInt(10);
|
||||
if (randomBoolean()) {
|
||||
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
|
||||
}
|
||||
for (int i = 0; i < aliasCount; i++) {
|
||||
builder.putAlias(randomAlias());
|
||||
}
|
||||
|
@ -504,7 +499,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
@Override
|
||||
public IndexMetaData randomChange(IndexMetaData part) {
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(part);
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
builder.settings(Settings.builder().put(part.getSettings()).put(randomSettings(Settings.EMPTY)));
|
||||
break;
|
||||
|
@ -518,9 +513,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
case 2:
|
||||
builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
break;
|
||||
case 3:
|
||||
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Shouldn't be here");
|
||||
}
|
||||
|
@ -529,23 +521,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a random warmer
|
||||
*/
|
||||
private IndexWarmersMetaData randomWarmers() {
|
||||
if (randomBoolean()) {
|
||||
return new IndexWarmersMetaData(
|
||||
new IndexWarmersMetaData.Entry(
|
||||
randomName("warm"),
|
||||
new String[]{randomName("type")},
|
||||
randomBoolean(),
|
||||
new IndexWarmersMetaData.SearchSource(new BytesArray(randomAsciiOfLength(1000))))
|
||||
);
|
||||
} else {
|
||||
return new IndexWarmersMetaData();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomly adds, deletes or updates index templates in the metadata
|
||||
*/
|
||||
|
@ -576,9 +551,6 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
for (int i = 0; i < aliasCount; i++) {
|
||||
builder.putAlias(randomAlias());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.ack;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
|
@ -27,9 +26,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
|
@ -42,12 +38,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
@ -88,83 +81,6 @@ public class AckIT extends ESIntegTestCase {
|
|||
assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
|
||||
}
|
||||
|
||||
public void testPutWarmerAcknowledgement() {
|
||||
createIndex("test");
|
||||
// make sure one shard is started so the search during put warmer will not fail
|
||||
index("test", "type", "1", "f", 1);
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
for (Client client : clients()) {
|
||||
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
|
||||
assertThat(getWarmersResponse.warmers().size(), equalTo(1));
|
||||
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
|
||||
assertThat(entry.key, equalTo("test"));
|
||||
assertThat(entry.value.size(), equalTo(1));
|
||||
assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPutWarmerNoAcknowledgement() throws InterruptedException {
|
||||
createIndex("test");
|
||||
// make sure one shard is started so the search during put warmer will not fail
|
||||
index("test", "type", "1", "f", 1);
|
||||
|
||||
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.get();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
|
||||
/* Since we don't wait for the ack here we have to wait until the search request has been executed from the master
|
||||
* otherwise the test infra might have already deleted the index and the search request fails on all shards causing
|
||||
* the test to fail too. We simply wait until the the warmer has been installed and also clean it up afterwards.*/
|
||||
assertTrue(awaitBusy(() -> {
|
||||
for (Client client : clients()) {
|
||||
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
|
||||
if (getWarmersResponse.warmers().size() != 1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}));
|
||||
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
|
||||
}
|
||||
|
||||
public void testDeleteWarmerAcknowledgement() {
|
||||
createIndex("test");
|
||||
index("test", "type", "1", "f", 1);
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
|
||||
|
||||
for (Client client : clients()) {
|
||||
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
|
||||
assertThat(getWarmersResponse.warmers().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteWarmerNoAcknowledgement() throws InterruptedException {
|
||||
createIndex("test");
|
||||
index("test", "type", "1", "f", 1);
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").setTimeout("0s").get();
|
||||
assertFalse(deleteWarmerResponse.isAcknowledged());
|
||||
assertTrue(awaitBusy(() -> {
|
||||
for (Client client : clients()) {
|
||||
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
|
||||
if (getWarmersResponse.warmers().size() > 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}));
|
||||
}
|
||||
|
||||
public void testClusterRerouteAcknowledgement() throws InterruptedException {
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
|
|
|
@ -31,7 +31,10 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
|
@ -42,6 +45,8 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class CacheTests extends ESTestCase {
|
||||
private int numberOfEntries;
|
||||
|
@ -483,7 +488,7 @@ public class CacheTests extends ESTestCase {
|
|||
return value;
|
||||
});
|
||||
} catch (ExecutionException e) {
|
||||
fail(e.getMessage());
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < numberOfEntries; i++) {
|
||||
|
@ -491,25 +496,21 @@ public class CacheTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testComputeIfAbsentCallsOnce() throws InterruptedException {
|
||||
public void testComputeIfAbsentCallsOnce() throws BrokenBarrierException, InterruptedException {
|
||||
int numberOfThreads = randomIntBetween(2, 32);
|
||||
final Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
|
||||
AtomicReferenceArray flags = new AtomicReferenceArray(numberOfEntries);
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
flags.set(j, false);
|
||||
}
|
||||
CountDownLatch startGate = new CountDownLatch(1);
|
||||
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
|
||||
AtomicBoolean interrupted = new AtomicBoolean();
|
||||
|
||||
CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
try {
|
||||
startGate.await();
|
||||
} catch (InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
return;
|
||||
}
|
||||
barrier.await();
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
try {
|
||||
cache.computeIfAbsent(j, key -> {
|
||||
|
@ -517,18 +518,24 @@ public class CacheTests extends ESTestCase {
|
|||
return Integer.toString(key);
|
||||
});
|
||||
} catch (ExecutionException e) {
|
||||
throw new RuntimeException(e);
|
||||
failures.add(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
endGate.countDown();
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
startGate.countDown();
|
||||
endGate.await();
|
||||
assertFalse(interrupted.get());
|
||||
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
|
||||
assertThat(failures, is(empty()));
|
||||
}
|
||||
|
||||
public void testComputeIfAbsentThrowsExceptionIfLoaderReturnsANullValue() {
|
||||
|
@ -541,7 +548,7 @@ public class CacheTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDependentKeyDeadlock() throws InterruptedException {
|
||||
public void testDependentKeyDeadlock() throws BrokenBarrierException, InterruptedException {
|
||||
class Key {
|
||||
private final int key;
|
||||
|
||||
|
@ -568,18 +575,19 @@ public class CacheTests extends ESTestCase {
|
|||
|
||||
int numberOfThreads = randomIntBetween(2, 32);
|
||||
final Cache<Key, Integer> cache = CacheBuilder.<Key, Integer>builder().build();
|
||||
CountDownLatch startGate = new CountDownLatch(1);
|
||||
|
||||
CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
CountDownLatch deadlockLatch = new CountDownLatch(numberOfThreads);
|
||||
AtomicBoolean interrupted = new AtomicBoolean();
|
||||
List<Thread> threads = new ArrayList<>();
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
try {
|
||||
startGate.await();
|
||||
} catch (InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
return;
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
Random random = new Random(random().nextLong());
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
|
@ -594,7 +602,8 @@ public class CacheTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
} catch (ExecutionException e) {
|
||||
fail(e.getMessage());
|
||||
failures.add(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -631,7 +640,7 @@ public class CacheTests extends ESTestCase {
|
|||
}, 1, 1, TimeUnit.SECONDS);
|
||||
|
||||
// everything is setup, release the hounds
|
||||
startGate.countDown();
|
||||
barrier.await();
|
||||
|
||||
// wait for either deadlock to be detected or the threads to terminate
|
||||
deadlockLatch.await();
|
||||
|
@ -639,24 +648,21 @@ public class CacheTests extends ESTestCase {
|
|||
// shutdown the watchdog service
|
||||
scheduler.shutdown();
|
||||
|
||||
assertThat(failures, is(empty()));
|
||||
|
||||
assertFalse("deadlock", deadlock.get());
|
||||
}
|
||||
|
||||
public void testCachePollution() throws InterruptedException {
|
||||
public void testCachePollution() throws BrokenBarrierException, InterruptedException {
|
||||
int numberOfThreads = randomIntBetween(2, 32);
|
||||
final Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
|
||||
CountDownLatch startGate = new CountDownLatch(1);
|
||||
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
|
||||
AtomicBoolean interrupted = new AtomicBoolean();
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
try {
|
||||
startGate.await();
|
||||
} catch (InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
return;
|
||||
}
|
||||
barrier.await();
|
||||
Random random = new Random(random().nextLong());
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
Integer key = random.nextInt(numberOfEntries);
|
||||
|
@ -686,21 +692,23 @@ public class CacheTests extends ESTestCase {
|
|||
cache.get(key);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
endGate.countDown();
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
|
||||
startGate.countDown();
|
||||
endGate.await();
|
||||
assertFalse(interrupted.get());
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
}
|
||||
|
||||
// test that the cache is not corrupted under lots of concurrent modifications, even hitting the same key
|
||||
// here be dragons: this test did catch one subtle bug during development; do not remove lightly
|
||||
public void testTorture() throws InterruptedException {
|
||||
public void testTorture() throws BrokenBarrierException, InterruptedException {
|
||||
int numberOfThreads = randomIntBetween(2, 32);
|
||||
final Cache<Integer, String> cache =
|
||||
CacheBuilder.<Integer, String>builder()
|
||||
|
@ -708,32 +716,28 @@ public class CacheTests extends ESTestCase {
|
|||
.weigher((k, v) -> 2)
|
||||
.build();
|
||||
|
||||
CountDownLatch startGate = new CountDownLatch(1);
|
||||
CountDownLatch endGate = new CountDownLatch(numberOfThreads);
|
||||
AtomicBoolean interrupted = new AtomicBoolean();
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
try {
|
||||
startGate.await();
|
||||
} catch (InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
return;
|
||||
}
|
||||
barrier.await();
|
||||
Random random = new Random(random().nextLong());
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
Integer key = random.nextInt(numberOfEntries);
|
||||
cache.put(key, Integer.toString(j));
|
||||
}
|
||||
} finally {
|
||||
endGate.countDown();
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
startGate.countDown();
|
||||
endGate.await();
|
||||
assertFalse(interrupted.get());
|
||||
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
|
||||
cache.refresh();
|
||||
assertEquals(500, cache.count());
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MMapDirectory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -54,14 +53,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*
|
||||
*/
|
||||
public class LuceneTests extends ESTestCase {
|
||||
/**
|
||||
* simple test that ensures that we bump the version on Upgrade
|
||||
*/
|
||||
public void testVersion() {
|
||||
// note this is just a silly sanity check, we test it in lucene, and we point to it this way
|
||||
assertEquals(Lucene.VERSION, Version.LATEST);
|
||||
}
|
||||
|
||||
public void testWaitForIndex() throws Exception {
|
||||
final MockDirectoryWrapper dir = newMockDirectory();
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -1316,7 +1317,7 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
public void testFailFlush() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
final AtomicBoolean fail = new AtomicBoolean();
|
||||
final FailSwitch fail = new FailSwitch();
|
||||
TranslogConfig config = getTranslogConfig(tempDir);
|
||||
Translog translog = getFailableTranslog(fail, config);
|
||||
|
||||
|
@ -1336,9 +1337,13 @@ public class TranslogTests extends ESTestCase {
|
|||
assertFalse(translog.isOpen());
|
||||
assertEquals("__FAKE__ no space left on device", ex.getMessage());
|
||||
}
|
||||
fail.set(randomBoolean());
|
||||
if (randomBoolean()) {
|
||||
fail.failAlways();
|
||||
} else {
|
||||
fail.failNever();
|
||||
}
|
||||
fail.set(false);
|
||||
}
|
||||
fail.failNever();
|
||||
if (randomBoolean()) {
|
||||
try {
|
||||
locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
|
||||
|
@ -1409,13 +1414,13 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
public void testTragicEventCanBeAnyException() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
final AtomicBoolean fail = new AtomicBoolean();
|
||||
final FailSwitch fail = new FailSwitch();
|
||||
TranslogConfig config = getTranslogConfig(tempDir);
|
||||
assumeFalse("this won't work if we sync on any op", config.isSyncOnEachOperation());
|
||||
Translog translog = getFailableTranslog(fail, config, false, true);
|
||||
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
|
||||
translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
|
||||
fail.set(true);
|
||||
fail.failAlways();
|
||||
try {
|
||||
Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
|
||||
if (randomBoolean()) {
|
||||
|
@ -1436,7 +1441,7 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException {
|
||||
Path tempDir = createTempDir();
|
||||
final AtomicBoolean fail = new AtomicBoolean(false);
|
||||
final FailSwitch fail = new FailSwitch();
|
||||
|
||||
TranslogConfig config = getTranslogConfig(tempDir);
|
||||
Translog translog = getFailableTranslog(fail, config);
|
||||
|
@ -1473,7 +1478,7 @@ public class TranslogTests extends ESTestCase {
|
|||
// this holds a reference to the current tlog channel such that it's not closed
|
||||
// if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip
|
||||
// otherwise our assertions here are off by one sometimes.
|
||||
fail.set(true);
|
||||
fail.failAlways();
|
||||
for (int i = 0; i < threadCount; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
@ -1525,11 +1530,40 @@ public class TranslogTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException {
|
||||
private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException {
|
||||
return getFailableTranslog(fail, config, randomBoolean(), false);
|
||||
}
|
||||
|
||||
private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException {
|
||||
private static class FailSwitch {
|
||||
private volatile int failRate;
|
||||
private volatile boolean onceFailedFailAlways = false;
|
||||
public boolean fail() {
|
||||
boolean fail = randomIntBetween(1, 100) <= failRate;
|
||||
if (fail && onceFailedFailAlways) {
|
||||
failAlways();
|
||||
}
|
||||
return fail;
|
||||
}
|
||||
|
||||
public void failNever() {
|
||||
failRate = 0;
|
||||
}
|
||||
|
||||
public void failAlways() {
|
||||
failRate = 100;
|
||||
}
|
||||
|
||||
public void failRandomly() {
|
||||
failRate = randomIntBetween(1, 100);
|
||||
}
|
||||
|
||||
public void onceFailedFailAlways() {
|
||||
onceFailedFailAlways = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException {
|
||||
return new Translog(config) {
|
||||
@Override
|
||||
TranslogWriter.ChannelFactory getChannelFactory() {
|
||||
|
@ -1539,23 +1573,56 @@ public class TranslogTests extends ESTestCase {
|
|||
@Override
|
||||
public FileChannel open(Path file) throws IOException {
|
||||
FileChannel channel = factory.open(file);
|
||||
return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel);
|
||||
boolean success = false;
|
||||
try {
|
||||
ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel);
|
||||
success = true;
|
||||
return throwingFileChannel;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
IOUtils.closeWhileHandlingException(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean assertBytesAtLocation(Location location, BytesReference expectedBytes) throws IOException {
|
||||
return true; // we don't wanna fail in the assert
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static class ThrowingFileChannel extends FilterFileChannel {
|
||||
private final AtomicBoolean fail;
|
||||
private final FailSwitch fail;
|
||||
private final boolean partialWrite;
|
||||
private final boolean throwUnknownException;
|
||||
|
||||
public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) {
|
||||
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
|
||||
super(delegate);
|
||||
this.fail = fail;
|
||||
this.partialWrite = partialWrite;
|
||||
this.throwUnknownException = throwUnknownException;
|
||||
if (fail.fail()) {
|
||||
throw new MockDirectoryWrapper.FakeIOException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(ByteBuffer dst) throws IOException {
|
||||
if (fail.fail()) {
|
||||
throw new MockDirectoryWrapper.FakeIOException();
|
||||
}
|
||||
return super.read(dst);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
|
||||
if (fail.fail()) {
|
||||
throw new MockDirectoryWrapper.FakeIOException();
|
||||
}
|
||||
return super.read(dsts, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1570,7 +1637,7 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
|
||||
public int write(ByteBuffer src) throws IOException {
|
||||
if (fail.get()) {
|
||||
if (fail.fail()) {
|
||||
if (partialWrite) {
|
||||
if (src.hasRemaining()) {
|
||||
final int pos = src.position();
|
||||
|
@ -1590,6 +1657,22 @@ public class TranslogTests extends ESTestCase {
|
|||
}
|
||||
return super.write(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void force(boolean metaData) throws IOException {
|
||||
if (fail.fail()) {
|
||||
throw new MockDirectoryWrapper.FakeIOException();
|
||||
}
|
||||
super.force(metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long position() throws IOException {
|
||||
if (fail.fail()) {
|
||||
throw new MockDirectoryWrapper.FakeIOException();
|
||||
}
|
||||
return super.position();
|
||||
}
|
||||
}
|
||||
|
||||
private static final class UnknownException extends RuntimeException {
|
||||
|
@ -1711,4 +1794,78 @@ public class TranslogTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test adds operations to the translog which might randomly throw an IOException. The only thing this test verifies is
|
||||
* that we can, after we hit an exception, open and recover the translog successfully and retrieve all successfully synced operations
|
||||
* from the transaction log.
|
||||
*/
|
||||
public void testWithRandomException() throws IOException {
|
||||
final int runs = randomIntBetween(5, 10);
|
||||
for (int run = 0; run < runs; run++) {
|
||||
Path tempDir = createTempDir();
|
||||
final FailSwitch fail = new FailSwitch();
|
||||
fail.failRandomly();
|
||||
TranslogConfig config = getTranslogConfig(tempDir);
|
||||
final int numOps = randomIntBetween(100, 200);
|
||||
List<String> syncedDocs = new ArrayList<>();
|
||||
List<String> unsynced = new ArrayList<>();
|
||||
if (randomBoolean()) {
|
||||
fail.onceFailedFailAlways();
|
||||
}
|
||||
try {
|
||||
final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false);
|
||||
try {
|
||||
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
|
||||
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
|
||||
String doc = lineFileDocs.nextDoc().toString();
|
||||
failableTLog.add(new Translog.Index("test", "" + opsAdded, doc.getBytes(Charset.forName("UTF-8"))));
|
||||
unsynced.add(doc);
|
||||
if (randomBoolean()) {
|
||||
failableTLog.sync();
|
||||
syncedDocs.addAll(unsynced);
|
||||
unsynced.clear();
|
||||
}
|
||||
if (randomFloat() < 0.1) {
|
||||
failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
|
||||
syncedDocs.addAll(unsynced);
|
||||
unsynced.clear();
|
||||
if (randomBoolean()) {
|
||||
failableTLog.prepareCommit();
|
||||
}
|
||||
failableTLog.commit();
|
||||
syncedDocs.clear();
|
||||
}
|
||||
}
|
||||
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
|
||||
// fair enough
|
||||
} catch (IOException ex) {
|
||||
assertEquals(ex.getMessage(), "__FAKE__ no space left on device");
|
||||
} finally {
|
||||
config.setTranslogGeneration(failableTLog.getGeneration());
|
||||
IOUtils.closeWhileHandlingException(failableTLog);
|
||||
}
|
||||
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
|
||||
// failed - that's ok, we didn't even create it
|
||||
}
|
||||
// now randomly open this failing tlog again just to make sure we can also recover from failing during recovery
|
||||
if (randomBoolean()) {
|
||||
try {
|
||||
IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false));
|
||||
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
|
||||
// failed - that's ok, we didn't even create it
|
||||
}
|
||||
}
|
||||
|
||||
try (Translog translog = new Translog(config)) {
|
||||
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
|
||||
assertEquals(syncedDocs.size(), snapshot.estimatedTotalOperations());
|
||||
for (int i = 0; i < syncedDocs.size(); i++) {
|
||||
Translog.Operation next = snapshot.next();
|
||||
assertEquals(syncedDocs.get(i), next.getSource().source.toUtf8());
|
||||
assertNotNull("operation " + i + " must be non-null", next);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBui
|
|||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateSourceBuilder;
|
||||
|
@ -49,10 +48,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilders;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
|
||||
|
@ -86,7 +82,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1", "test2"), true);
|
||||
verify(getFieldMapping("test1", "test2"), true);
|
||||
verify(getMapping("test1", "test2"), true);
|
||||
verify(getWarmer("test1", "test2"), true);
|
||||
verify(getSettings("test1", "test2"), true);
|
||||
|
||||
IndicesOptions options = IndicesOptions.strictExpandOpen();
|
||||
|
@ -107,7 +102,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(getMapping("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(getWarmer("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(getSettings("test1", "test2").setIndicesOptions(options), true);
|
||||
|
||||
options = IndicesOptions.lenientExpandOpen();
|
||||
|
@ -128,7 +122,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
|
||||
|
||||
options = IndicesOptions.strictExpandOpen();
|
||||
|
@ -151,7 +144,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
|
||||
}
|
||||
|
||||
|
@ -182,7 +174,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), true);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), true);
|
||||
verify(getMapping("test1").setIndicesOptions(options), true);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), true);
|
||||
verify(getSettings("test1").setIndicesOptions(options), true);
|
||||
|
||||
options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
|
||||
|
@ -203,7 +194,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1").setIndicesOptions(options), false);
|
||||
|
||||
assertAcked(client().admin().indices().prepareOpen("test1"));
|
||||
|
@ -227,7 +217,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1").setIndicesOptions(options), false);
|
||||
}
|
||||
|
||||
|
@ -249,7 +238,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), true);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), true);
|
||||
verify(getMapping("test1").setIndicesOptions(options), true);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), true);
|
||||
verify(getSettings("test1").setIndicesOptions(options), true);
|
||||
|
||||
options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
|
||||
|
@ -269,7 +257,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1").setIndicesOptions(options), false);
|
||||
|
||||
assertAcked(prepareCreate("test1"));
|
||||
|
@ -292,7 +279,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases("test1").setIndicesOptions(options), false);
|
||||
verify(getFieldMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getMapping("test1").setIndicesOptions(options), false);
|
||||
verify(getWarmer("test1").setIndicesOptions(options), false);
|
||||
verify(getSettings("test1").setIndicesOptions(options), false);
|
||||
}
|
||||
|
||||
|
@ -346,7 +332,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases(indices), false);
|
||||
verify(getFieldMapping(indices), false);
|
||||
verify(getMapping(indices), false);
|
||||
verify(getWarmer(indices), false);
|
||||
verify(getSettings(indices), false);
|
||||
|
||||
// Now force allow_no_indices=true
|
||||
|
@ -368,7 +353,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases(indices).setIndicesOptions(options), false);
|
||||
verify(getFieldMapping(indices).setIndicesOptions(options), false);
|
||||
verify(getMapping(indices).setIndicesOptions(options), false);
|
||||
verify(getWarmer(indices).setIndicesOptions(options), false);
|
||||
verify(getSettings(indices).setIndicesOptions(options), false);
|
||||
|
||||
assertAcked(prepareCreate("foobar"));
|
||||
|
@ -393,7 +377,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases(indices), false);
|
||||
verify(getFieldMapping(indices), false);
|
||||
verify(getMapping(indices), false);
|
||||
verify(getWarmer(indices), false);
|
||||
verify(getSettings(indices).setIndicesOptions(options), false);
|
||||
|
||||
// Verify defaults for wildcards, with two wildcard expression and one existing index
|
||||
|
@ -415,7 +398,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases(indices), false);
|
||||
verify(getFieldMapping(indices), false);
|
||||
verify(getMapping(indices), false);
|
||||
verify(getWarmer(indices), false);
|
||||
verify(getSettings(indices).setIndicesOptions(options), false);
|
||||
|
||||
// Now force allow_no_indices=true
|
||||
|
@ -437,7 +419,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(getAliases(indices).setIndicesOptions(options), false);
|
||||
verify(getFieldMapping(indices).setIndicesOptions(options), false);
|
||||
verify(getMapping(indices).setIndicesOptions(options), false);
|
||||
verify(getWarmer(indices).setIndicesOptions(options), false);
|
||||
verify(getSettings(indices).setIndicesOptions(options), false);
|
||||
}
|
||||
|
||||
|
@ -581,34 +562,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false));
|
||||
}
|
||||
|
||||
public void testPutWarmer() throws Exception {
|
||||
createIndex("foobar");
|
||||
ensureYellow();
|
||||
verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false);
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
|
||||
|
||||
}
|
||||
|
||||
public void testPutWarmerWildcard() throws Exception {
|
||||
createIndex("foo", "foobar", "bar", "barbaz");
|
||||
ensureYellow();
|
||||
|
||||
verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false);
|
||||
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
|
||||
|
||||
verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false);
|
||||
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
|
||||
|
||||
}
|
||||
|
||||
public void testPutAlias() throws Exception {
|
||||
createIndex("foobar");
|
||||
ensureYellow();
|
||||
|
@ -635,46 +588,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
}
|
||||
|
||||
public void testDeleteWarmer() throws Exception {
|
||||
SearchSourceBuilder source = new SearchSourceBuilder();
|
||||
source.query(QueryBuilders.matchAllQuery());
|
||||
IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "typ1" }, false, new IndexWarmersMetaData.SearchSource(source));
|
||||
assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
|
||||
ensureYellow();
|
||||
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true);
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false);
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testDeleteWarmerWildcard() throws Exception {
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true);
|
||||
|
||||
SearchSourceBuilder source = new SearchSourceBuilder();
|
||||
source.query(QueryBuilders.matchAllQuery());
|
||||
IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry("test1", new String[] { "type1" }, false, new IndexWarmersMetaData.SearchSource(source));
|
||||
assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry)));
|
||||
assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
|
||||
assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry)));
|
||||
assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry)));
|
||||
ensureYellow();
|
||||
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false);
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete("foo*"));
|
||||
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true);
|
||||
|
||||
verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false);
|
||||
assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
|
||||
assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testPutMapping() throws Exception {
|
||||
verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true);
|
||||
verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true);
|
||||
|
@ -816,10 +729,6 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
return client().admin().indices().prepareGetMappings(indices);
|
||||
}
|
||||
|
||||
private static GetWarmersRequestBuilder getWarmer(String... indices) {
|
||||
return client().admin().indices().prepareGetWarmers(indices);
|
||||
}
|
||||
|
||||
private static GetSettingsRequestBuilder getSettings(String... indices) {
|
||||
return client().admin().indices().prepareGetSettings(indices);
|
||||
}
|
||||
|
|
|
@ -151,18 +151,10 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase {
|
|||
|
||||
for (int i = 0; i < numSearches; i++) {
|
||||
SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
|
||||
switch (randomIntBetween(0, 5)) {
|
||||
case 5:
|
||||
case 4:
|
||||
case 3:
|
||||
if (random().nextBoolean()) {
|
||||
searchRequestBuilder.addSort("test-str", SortOrder.ASC);
|
||||
// fall through - sometimes get both fields
|
||||
case 2:
|
||||
case 1:
|
||||
default:
|
||||
searchRequestBuilder.addSort("test-num", SortOrder.ASC);
|
||||
|
||||
}
|
||||
searchRequestBuilder.addSort("test-num", SortOrder.ASC);
|
||||
boolean success = false;
|
||||
try {
|
||||
// Sort by the string and numeric fields, to load them into field data
|
||||
|
@ -249,6 +241,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase {
|
|||
if (random.nextDouble() < topLevelRatio) {
|
||||
throw new IOException("Forced top level Exception on [" + flag.name() + "]");
|
||||
}
|
||||
break;
|
||||
case Intersect:
|
||||
break;
|
||||
case Norms:
|
||||
|
|
|
@ -1,143 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.warmer;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
*/
|
||||
@ClusterScope(numDataNodes =0, scope= Scope.TEST)
|
||||
public class GatewayIndicesWarmerIT extends ESIntegTestCase {
|
||||
private final ESLogger logger = Loggers.getLogger(GatewayIndicesWarmerIT.class);
|
||||
|
||||
public void testStatePersistence() throws Exception {
|
||||
logger.info("--> starting 1 nodes");
|
||||
internalCluster().startNode();
|
||||
|
||||
logger.info("--> putting two templates");
|
||||
createIndex("test");
|
||||
|
||||
ensureYellow();
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1"))));
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_2")
|
||||
.setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2"))));
|
||||
|
||||
logger.info("--> put template with warmer");
|
||||
client().admin().indices().preparePutTemplate("template_1")
|
||||
.setSource("{\n" +
|
||||
" \"template\" : \"xxx\",\n" +
|
||||
" \"warmers\" : {\n" +
|
||||
" \"warmer_1\" : {\n" +
|
||||
" \"types\" : [],\n" +
|
||||
" \"source\" : {\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"match_all\" : {}\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}")
|
||||
.execute().actionGet();
|
||||
|
||||
|
||||
logger.info("--> verify warmers are registered in cluster state");
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(warmersMetaData, Matchers.notNullValue());
|
||||
assertThat(warmersMetaData.entries().size(), equalTo(2));
|
||||
|
||||
IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(templateWarmers, Matchers.notNullValue());
|
||||
assertThat(templateWarmers.entries().size(), equalTo(1));
|
||||
|
||||
logger.info("--> restarting the node");
|
||||
internalCluster().fullRestart(new RestartCallback() {
|
||||
@Override
|
||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
});
|
||||
|
||||
ensureYellow();
|
||||
|
||||
logger.info("--> verify warmers are recovered");
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
|
||||
for (int i = 0; i < warmersMetaData.entries().size(); i++) {
|
||||
assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
|
||||
assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
|
||||
}
|
||||
|
||||
logger.info("--> verify warmers in template are recovered");
|
||||
IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size()));
|
||||
for (int i = 0; i < templateWarmers.entries().size(); i++) {
|
||||
assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name()));
|
||||
assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source()));
|
||||
}
|
||||
|
||||
|
||||
logger.info("--> delete warmer warmer_1");
|
||||
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet();
|
||||
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> verify warmers (delete) are registered in cluster state");
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(warmersMetaData, Matchers.notNullValue());
|
||||
assertThat(warmersMetaData.entries().size(), equalTo(1));
|
||||
|
||||
logger.info("--> restarting the node");
|
||||
internalCluster().fullRestart(new RestartCallback() {
|
||||
@Override
|
||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
});
|
||||
|
||||
ensureYellow();
|
||||
|
||||
logger.info("--> verify warmers are recovered");
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
|
||||
for (int i = 0; i < warmersMetaData.entries().size(); i++) {
|
||||
assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
|
||||
assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.warmer;
|
||||
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_BLOCK;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
|
||||
import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
|
||||
public class IndicesWarmerBlocksIT extends ESIntegTestCase {
|
||||
public void testPutWarmerWithBlocks() {
|
||||
createIndex("test-blocks");
|
||||
ensureGreen("test-blocks");
|
||||
|
||||
// Index reads are blocked, the warmer can't be registered
|
||||
try {
|
||||
enableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
|
||||
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
|
||||
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK);
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
|
||||
}
|
||||
|
||||
// Index writes are blocked, the warmer can be registered
|
||||
try {
|
||||
enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_acked")
|
||||
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
|
||||
}
|
||||
|
||||
// Index metadata changes are blocked, the warmer can't be registered
|
||||
try {
|
||||
enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
|
||||
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
|
||||
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK);
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
|
||||
}
|
||||
|
||||
// Index metadata changes are blocked, the warmer can't be registered
|
||||
try {
|
||||
enableIndexBlock("test-blocks", SETTING_READ_ONLY);
|
||||
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
|
||||
.setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK);
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", SETTING_READ_ONLY);
|
||||
}
|
||||
|
||||
// Adding a new warmer is not possible when the cluster is read-only
|
||||
try {
|
||||
setClusterReadOnly(true);
|
||||
assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
|
||||
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK);
|
||||
} finally {
|
||||
setClusterReadOnly(false);
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetWarmerWithBlocks() {
|
||||
createIndex("test-blocks");
|
||||
ensureGreen("test-blocks");
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
|
||||
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
// Request is not blocked
|
||||
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
|
||||
try {
|
||||
enableIndexBlock("test-blocks", blockSetting);
|
||||
GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get();
|
||||
assertThat(response.warmers().size(), equalTo(1));
|
||||
|
||||
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = response.warmers().iterator().next();
|
||||
assertThat(entry.key, equalTo("test-blocks"));
|
||||
assertThat(entry.value.size(), equalTo(1));
|
||||
assertThat(entry.value.iterator().next().name(), equalTo("warmer_block"));
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", blockSetting);
|
||||
}
|
||||
}
|
||||
|
||||
// Request is blocked
|
||||
try {
|
||||
enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
|
||||
assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK);
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteWarmerWithBlocks() {
|
||||
createIndex("test-blocks");
|
||||
ensureGreen("test-blocks");
|
||||
|
||||
// Request is not blocked
|
||||
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
|
||||
try {
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
|
||||
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
enableIndexBlock("test-blocks", blockSetting);
|
||||
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", blockSetting);
|
||||
}
|
||||
}
|
||||
|
||||
// Request is blocked
|
||||
for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
|
||||
try {
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
|
||||
.setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
|
||||
|
||||
enableIndexBlock("test-blocks", blockSetting);
|
||||
assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", blockSetting);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,287 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.warmer;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class SimpleIndicesWarmerIT extends ESIntegTestCase {
|
||||
public void testSimpleWarmers() {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1")))
|
||||
.execute().actionGet();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2")))
|
||||
.execute().actionGet();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
|
||||
|
||||
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
|
||||
assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2")
|
||||
.execute().actionGet();
|
||||
assertThat(getWarmersResponse.getWarmers().size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testTtemplateWarmer() {
|
||||
client().admin().indices().preparePutTemplate("template_1")
|
||||
.setSource("{\n" +
|
||||
" \"template\" : \"*\",\n" +
|
||||
" \"warmers\" : {\n" +
|
||||
" \"warmer_1\" : {\n" +
|
||||
" \"types\" : [],\n" +
|
||||
" \"source\" : {\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"match_all\" : {}\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}")
|
||||
.execute().actionGet();
|
||||
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(warmersMetaData, Matchers.notNullValue());
|
||||
assertThat(warmersMetaData.entries().size(), equalTo(1));
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
|
||||
}
|
||||
|
||||
public void testCreateIndexWarmer() {
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSource("{\n" +
|
||||
" \"warmers\" : {\n" +
|
||||
" \"warmer_1\" : {\n" +
|
||||
" \"types\" : [],\n" +
|
||||
" \"source\" : {\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"match_all\" : {}\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}"));
|
||||
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
|
||||
assertThat(warmersMetaData, Matchers.notNullValue());
|
||||
assertThat(warmersMetaData.entries().size(), equalTo(1));
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
|
||||
}
|
||||
|
||||
public void testDeleteNonExistentIndexWarmer() {
|
||||
createIndex("test");
|
||||
try {
|
||||
client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet();
|
||||
fail("warmer foo should not exist");
|
||||
} catch (IndexWarmerMissingException ex) {
|
||||
assertThat(ex.names()[0], equalTo("foo"));
|
||||
}
|
||||
}
|
||||
|
||||
// issue 8991
|
||||
public void testDeleteAllIndexWarmerDoesNotThrowWhenNoWarmers() {
|
||||
createIndex("test");
|
||||
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer()
|
||||
.setIndices("test").setNames("_all").execute().actionGet();
|
||||
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer()
|
||||
.setIndices("test").setNames("foo", "_all", "bar").execute().actionGet();
|
||||
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testDeleteIndexWarmerTest() {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.get();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), equalTo(1));
|
||||
ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
|
||||
assertThat(entry.key, equalTo("test"));
|
||||
assertThat(entry.value.size(), equalTo(1));
|
||||
assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer"));
|
||||
|
||||
DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get();
|
||||
assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), equalTo(0));
|
||||
}
|
||||
|
||||
// issue 3246
|
||||
public void testEnsureThatIndexWarmersCanBeChangedOnRuntime() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.execute().actionGet();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
|
||||
|
||||
logger.info("--> Disabling warmers execution");
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.warmer.enabled", false)).execute().actionGet();
|
||||
|
||||
long warmerRunsAfterDisabling = getWarmerRuns();
|
||||
assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L));
|
||||
|
||||
client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet();
|
||||
|
||||
assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling));
|
||||
}
|
||||
|
||||
public void testGettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.execute().actionGet();
|
||||
assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.execute().actionGet();
|
||||
assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), is(1));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), is(1));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), is(1));
|
||||
|
||||
getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get();
|
||||
assertThat(getWarmersResponse.warmers().size(), is(1));
|
||||
}
|
||||
|
||||
private long getWarmerRuns() {
|
||||
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet();
|
||||
return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total();
|
||||
}
|
||||
|
||||
public void testQueryCacheOnWarmer() {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false)));
|
||||
logger.info("register warmer with no query cache, validate no cache is used");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.get());
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
logger.info("register warmer with query cache, validate caching happened");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()).setRequestCache(true))
|
||||
.get());
|
||||
|
||||
// index again, to make sure it gets refreshed
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
logger.info("enable default query caching on the index level, and test that no flag on warmer still caches");
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true)));
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.get());
|
||||
|
||||
// index again, to make sure it gets refreshed
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
}
|
||||
}
|
|
@ -251,7 +251,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
|
|||
.setSource(onlyField1Doc).execute().actionGet();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
|
||||
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
|
||||
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
|
||||
break;
|
||||
case 1:
|
||||
atLeastExpected = type2.get();
|
||||
|
@ -259,7 +259,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
|
|||
.setSource(onlyField2Doc).execute().actionGet();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
|
||||
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
|
||||
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
|
||||
break;
|
||||
case 2:
|
||||
atLeastExpected = type3.get();
|
||||
|
@ -267,7 +267,7 @@ public class ConcurrentPercolatorIT extends ESIntegTestCase {
|
|||
.setSource(field1AndField2Doc).execute().actionGet();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
|
||||
assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
|
||||
assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -223,6 +223,7 @@ public class RecoveryPercolatorIT extends ESIntegTestCase {
|
|||
percolatorRecovery(false);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "sometimes reprodes with: gradle :core:integTest -Dtests.seed=21DDCAA92013B00C -Dtests.class=org.elasticsearch.percolator.RecoveryPercolatorIT -Dtests.method=\"testMultiPercolatorRecovery\"")
|
||||
public void testMultiPercolatorRecovery() throws Exception {
|
||||
percolatorRecovery(true);
|
||||
}
|
||||
|
|
|
@ -327,7 +327,6 @@ public class HeadersAndContextCopyClientTests extends ESTestCase {
|
|||
client.admin().indices().prepareCreate("test"),
|
||||
client.admin().indices().prepareAliases(),
|
||||
client.admin().indices().prepareAnalyze("text"),
|
||||
client.admin().indices().prepareDeleteWarmer(),
|
||||
client.admin().indices().prepareTypesExists("type"),
|
||||
client.admin().indices().prepareClose()
|
||||
};
|
||||
|
|
|
@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
|||
|
||||
logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes");
|
||||
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape")
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree")
|
||||
.execute().actionGet();
|
||||
|
||||
XContentBuilder docSource = gcb.toXContent(jsonBuilder().startObject().field("location"), null).endObject();
|
||||
|
@ -317,10 +317,10 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testContainsShapeQuery() throws Exception {
|
||||
// Create a random geometry collection.
|
||||
Rectangle mbr = xRandomRectangle(getRandom(), xRandomPoint(getRandom()));
|
||||
Rectangle mbr = xRandomRectangle(getRandom(), xRandomPoint(getRandom()), true);
|
||||
GeometryCollectionBuilder gcb = createGeometryCollectionWithin(getRandom(), mbr);
|
||||
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape")
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree" )
|
||||
.execute().actionGet();
|
||||
|
||||
XContentBuilder docSource = gcb.toXContent(jsonBuilder().startObject().field("location"), null).endObject();
|
||||
|
@ -333,7 +333,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
|||
|
||||
ShapeBuilder filterShape = (gcb.getShapeAt(randomIntBetween(0, gcb.numShapes() - 1)));
|
||||
GeoShapeQueryBuilder filter = QueryBuilders.geoShapeQuery("location", filterShape)
|
||||
.relation(ShapeRelation.INTERSECTS);
|
||||
.relation(ShapeRelation.CONTAINS);
|
||||
SearchResponse response = client().prepareSearch("test").setTypes("type").setQuery(QueryBuilders.matchAllQuery())
|
||||
.setPostFilter(filter).get();
|
||||
assertSearchResponse(response);
|
||||
|
@ -343,7 +343,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testShapeFilterWithDefinedGeoCollection() throws Exception {
|
||||
createIndex("shapes");
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape")
|
||||
client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree")
|
||||
.execute().actionGet();
|
||||
|
||||
XContentBuilder docSource = jsonBuilder().startObject().startObject("location")
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.search.highlight;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -802,9 +803,8 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test", "type1")
|
||||
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
|
||||
refresh();
|
||||
indexRandom(true, client().prepareIndex("test", "type1")
|
||||
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog"));
|
||||
|
||||
logger.info("--> highlighting and searching on field1");
|
||||
SearchSourceBuilder source = searchSource()
|
||||
|
@ -822,7 +822,6 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
|
||||
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
|
||||
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
|
||||
|
||||
logger.info("--> searching on _all, highlighting on field2");
|
||||
|
@ -832,7 +831,6 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
|
||||
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
|
||||
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
|
||||
|
||||
logger.info("--> searching on _all, highlighting on field2");
|
||||
|
@ -842,8 +840,26 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
|
||||
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
|
||||
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
|
||||
|
||||
logger.info("--> searching with boundary characters");
|
||||
source = searchSource()
|
||||
.query(matchQuery("field2", "quick"))
|
||||
.highlighter(highlight().field("field2", 30, 1).boundaryChars(new char[] {' '}));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
|
||||
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over"));
|
||||
|
||||
logger.info("--> searching with boundary characters on the field");
|
||||
source = searchSource()
|
||||
.query(matchQuery("field2", "quick"))
|
||||
.highlighter(highlight().field(new Field("field2").fragmentSize(30).numOfFragments(1).boundaryChars(new char[] {' '})));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
|
||||
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over"));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -114,7 +114,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator {
|
|||
throws InvalidShapeException {
|
||||
if (numGeometries <= 0) {
|
||||
// cap geometry collection at 4 shapes (to save test time)
|
||||
numGeometries = RandomInts.randomIntBetween(r, 2, 5);
|
||||
numGeometries = RandomInts.randomIntBetween(r, 2, 4);
|
||||
}
|
||||
|
||||
if (nearPoint == null) {
|
||||
|
@ -255,11 +255,31 @@ public class RandomShapeGenerator extends RandomGeoGenerator {
|
|||
return p;
|
||||
}
|
||||
|
||||
public static Rectangle xRandomRectangle(Random r, Point nearP) {
|
||||
Rectangle bounds = ctx.getWorldBounds();
|
||||
private static Rectangle xRandomRectangle(Random r, Point nearP, Rectangle bounds, boolean small) {
|
||||
if (nearP == null)
|
||||
nearP = xRandomPointIn(r, bounds);
|
||||
|
||||
if (small == true) {
|
||||
// between 3 and 6 degrees
|
||||
final double latRange = 3 * r.nextDouble() + 3;
|
||||
final double lonRange = 3 * r.nextDouble() + 3;
|
||||
|
||||
double minX = nearP.getX();
|
||||
double maxX = minX + lonRange;
|
||||
if (maxX > 180) {
|
||||
maxX = minX;
|
||||
minX -= lonRange;
|
||||
}
|
||||
double minY = nearP.getY();
|
||||
double maxY = nearP.getY() + latRange;
|
||||
if (maxY > 90) {
|
||||
maxY = minY;
|
||||
minY -= latRange;
|
||||
}
|
||||
|
||||
return ctx.makeRectangle(minX, maxX, minY, maxY);
|
||||
}
|
||||
|
||||
Range xRange = xRandomRange(r, rarely(r) ? 0 : nearP.getX(), Range.xRange(bounds, ctx));
|
||||
Range yRange = xRandomRange(r, rarely(r) ? 0 : nearP.getY(), Range.yRange(bounds, ctx));
|
||||
|
||||
|
@ -270,6 +290,14 @@ public class RandomShapeGenerator extends RandomGeoGenerator {
|
|||
xDivisible(yRange.getMax()*10e3)/10e3);
|
||||
}
|
||||
|
||||
public static Rectangle xRandomRectangle(Random r, Point nearP) {
|
||||
return xRandomRectangle(r, nearP, ctx.getWorldBounds(), false);
|
||||
}
|
||||
|
||||
public static Rectangle xRandomRectangle(Random r, Point nearP, boolean small) {
|
||||
return xRandomRectangle(r, nearP, ctx.getWorldBounds(), small);
|
||||
}
|
||||
|
||||
private static boolean rarely(Random r) {
|
||||
return RandomInts.randomInt(r, 100) >= 90;
|
||||
}
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -257,10 +257,19 @@ def generate_index(client, version, index_name):
|
|||
# Same as ES default (5 GB), but missing the units to make sure they are inserted on upgrade:
|
||||
settings['merge.policy.max_merged_segment'] = '5368709120'
|
||||
|
||||
warmers = {}
|
||||
warmers['warmer1'] = {
|
||||
'source': {
|
||||
'query': {
|
||||
'match_all': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client.indices.create(index=index_name, body={
|
||||
'settings': settings,
|
||||
'mappings': mappings
|
||||
'mappings': mappings,
|
||||
'warmers': warmers
|
||||
})
|
||||
health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
|
|
@ -132,7 +132,7 @@ HOSTNAME=`hostname | cut -d. -f1`
|
|||
export HOSTNAME
|
||||
|
||||
# manual parsing to find out, if process should be detached
|
||||
daemonized=`echo $* | grep -E -- '(^-d |-d$| -d |--daemonize$|--daemonize )'`
|
||||
daemonized=`echo $* | egrep -- '(^-d |-d$| -d |--daemonize$|--daemonize )'`
|
||||
if [ -z "$daemonized" ] ; then
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" \
|
||||
org.elasticsearch.bootstrap.Elasticsearch start "$@"
|
||||
|
|
|
@ -28,7 +28,7 @@ The Hadoop HDFS Repository plugin adds support for using HDFS as a repository.
|
|||
|
||||
The following plugin has been contributed by our community:
|
||||
|
||||
* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by http://en.cam4.es/youngqcmeat/Wikimedia Foundation)
|
||||
* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by Wikimedia Foundation)
|
||||
|
||||
This community plugin appears to have been abandoned:
|
||||
|
||||
|
|
|
@ -622,7 +622,9 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
"filter": {
|
||||
"french_elision": {
|
||||
"type": "elision",
|
||||
"articles": [ "l", "m", "t", "qu", "n", "s",
|
||||
"articles_case": true,
|
||||
"articles": [
|
||||
"l", "m", "t", "qu", "n", "s",
|
||||
"j", "d", "c", "jusqu", "quoiqu",
|
||||
"lorsqu", "puisqu"
|
||||
]
|
||||
|
|
|
@ -63,7 +63,7 @@ Currently available <<modules-threadpool,thread pools>>:
|
|||
|`search` |`s` |Thread pool used for <<search-search,search>>/<<search-count,count>> operations
|
||||
|`snapshot` |`sn` |Thread pool used for <<modules-snapshots,snapshot>> operations
|
||||
|`suggest` |`su` |Thread pool used for <<search-suggesters,suggester>> operations
|
||||
|`warmer` |`w` |Thread pool used for <<indices-warmers,index warm-up>> operations
|
||||
|`warmer` |`w` |Thread pool used for index warm-up operations
|
||||
|=======================================================================
|
||||
|
||||
The thread pool name (or alias) must be combined with a thread pool field below
|
||||
|
|
|
@ -4,8 +4,7 @@
|
|||
[partintro]
|
||||
--
|
||||
The indices APIs are used to manage individual indices,
|
||||
index settings, aliases, mappings, index templates
|
||||
and warmers.
|
||||
index settings, aliases, mappings, and index templates.
|
||||
|
||||
[float]
|
||||
[[index-management]]
|
||||
|
@ -38,7 +37,6 @@ and warmers.
|
|||
* <<indices-get-settings>>
|
||||
* <<indices-analyze>>
|
||||
* <<indices-templates>>
|
||||
* <<indices-warmers>>
|
||||
|
||||
[float]
|
||||
[[shadow-replicas]]
|
||||
|
@ -92,8 +90,6 @@ include::indices/analyze.asciidoc[]
|
|||
|
||||
include::indices/templates.asciidoc[]
|
||||
|
||||
include::indices/warmers.asciidoc[]
|
||||
|
||||
include::indices/shadow-replicas.asciidoc[]
|
||||
|
||||
include::indices/stats.asciidoc[]
|
||||
|
|
|
@ -86,27 +86,6 @@ curl -XPOST localhost:9200/test -d '{
|
|||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[warmers]]
|
||||
=== Warmers
|
||||
|
||||
The create index API allows also to provide a set of <<indices-warmers,warmers>>:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPUT localhost:9200/test -d '{
|
||||
"warmers" : {
|
||||
"warmer_1" : {
|
||||
"source" : {
|
||||
"query" : {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[create-index-aliases]]
|
||||
=== Aliases
|
||||
|
|
|
@ -27,4 +27,4 @@ $ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings'
|
|||
|
||||
The above command will only return the settings and mappings for the index called `twitter`.
|
||||
|
||||
The available features are `_settings`, `_mappings`, `_warmers` and `_aliases`.
|
||||
The available features are `_settings`, `_mappings` and `_aliases`.
|
||||
|
|
|
@ -1,194 +0,0 @@
|
|||
[[indices-warmers]]
|
||||
== Warmers
|
||||
|
||||
Index warming allows to run registered search requests to warm up the index
|
||||
before it is available for search. With the near real time aspect of search,
|
||||
cold data (segments) will be warmed up before they become available for search.
|
||||
This includes things such as the filter cache, filesystem cache, and loading
|
||||
field data for fields.
|
||||
|
||||
Warmup searches typically include requests that require heavy loading of
|
||||
data, such as aggregations or sorting on specific fields. The warmup APIs
|
||||
allows to register warmup (search) under specific names, remove them,
|
||||
and get them.
|
||||
|
||||
Index warmup can be disabled by setting `index.warmer.enabled` to
|
||||
`false`. It is supported as a realtime setting using update settings
|
||||
API. This can be handy when doing initial bulk indexing: disable pre
|
||||
registered warmers to make indexing faster and less expensive and then
|
||||
enable it.
|
||||
|
||||
[float]
|
||||
[[creation]]
|
||||
=== Index Creation / Templates
|
||||
|
||||
Warmers can be registered when an index gets created, for example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPUT localhost:9200/test -d '{
|
||||
"warmers" : {
|
||||
"warmer_1" : {
|
||||
"types" : [],
|
||||
"source" : {
|
||||
"query" : {
|
||||
...
|
||||
},
|
||||
"aggs" : {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
Or, in an index template:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPUT localhost:9200/_template/template_1 -d '
|
||||
{
|
||||
"template" : "te*",
|
||||
"warmers" : {
|
||||
"warmer_1" : {
|
||||
"types" : [],
|
||||
"source" : {
|
||||
"query" : {
|
||||
...
|
||||
},
|
||||
"aggs" : {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
On the same level as `types` and `source`, the `request_cache` flag is supported
|
||||
to enable request caching for the warmed search request. If not specified, it will
|
||||
use the index level configuration of query caching.
|
||||
|
||||
[float]
|
||||
[[warmer-adding]]
|
||||
=== Put Warmer
|
||||
|
||||
Allows to put a warmup search request on a specific index (or indices),
|
||||
with the body composing of a regular search request. Types can be
|
||||
provided as part of the URI if the search request is designed to be run
|
||||
only against the specific types.
|
||||
|
||||
Here is an example that registers a warmup called `warmer_1` against
|
||||
index `test` (can be alias or several indices), for a search request
|
||||
that runs against all types:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPUT localhost:9200/test/_warmer/warmer_1 -d '{
|
||||
"query" : {
|
||||
"match_all" : {}
|
||||
},
|
||||
"aggs" : {
|
||||
"aggs_1" : {
|
||||
"terms" : {
|
||||
"field" : "field"
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
And an example that registers a warmup against specific types:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPUT localhost:9200/test/type1/_warmer/warmer_1 -d '{
|
||||
"query" : {
|
||||
"match_all" : {}
|
||||
},
|
||||
"aggs" : {
|
||||
"aggs_1" : {
|
||||
"terms" : {
|
||||
"field" : "field"
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
|
||||
All options:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
||||
PUT _warmer/{warmer_name}
|
||||
|
||||
PUT /{index}/_warmer/{warmer_name}
|
||||
|
||||
PUT /{index}/{type}/_warmer/{warmer_name}
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
where
|
||||
|
||||
[horizontal]
|
||||
`{index}`:: `* | _all | glob pattern | name1, name2, …`
|
||||
|
||||
`{type}`:: `* | _all | glob pattern | name1, name2, …`
|
||||
|
||||
Instead of `_warmer` you can also use the plural `_warmers`.
|
||||
|
||||
The `request_cache` parameter can be used to enable request caching for
|
||||
the search request. If not specified, it will use the index level configuration
|
||||
of query caching.
|
||||
|
||||
|
||||
[float]
|
||||
[[removing]]
|
||||
=== Delete Warmers
|
||||
|
||||
Warmers can be deleted using the following endpoint:
|
||||
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
||||
[DELETE] /{index}/_warmer/{name}
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
where
|
||||
|
||||
[horizontal]
|
||||
`{index}`:: `* | _all | glob pattern | name1, name2, …`
|
||||
|
||||
`{name}`:: `* | _all | glob pattern | name1, name2, …`
|
||||
|
||||
Instead of `_warmer` you can also use the plural `_warmers`.
|
||||
|
||||
[float]
|
||||
[[warmer-retrieving]]
|
||||
=== GETting Warmer
|
||||
|
||||
Getting a warmer for specific index (or alias, or several indices) based
|
||||
on its name. The provided name can be a simple wildcard expression or
|
||||
omitted to get all warmers.
|
||||
|
||||
Some examples:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
# get warmer named warmer_1 on test index
|
||||
curl -XGET localhost:9200/test/_warmer/warmer_1
|
||||
|
||||
# get all warmers that start with warm on test index
|
||||
curl -XGET localhost:9200/test/_warmer/warm*
|
||||
|
||||
# get all warmers for test index
|
||||
curl -XGET localhost:9200/test/_warmer/
|
||||
--------------------------------------------------
|
||||
|
|
@ -33,7 +33,7 @@ purposes with:
|
|||
Custom rules to configure the mapping for dynamically added fields.
|
||||
|
||||
TIP: <<indices-templates,Index templates>> allow you to configure the default
|
||||
mappings, settings, aliases, and warmers for new indices, whether created
|
||||
mappings, settings and aliases for new indices, whether created
|
||||
automatically or explicitly.
|
||||
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ In the future we will also provide plural versions to allow putting multiple map
|
|||
See <<indices-put-mapping,`put-mapping`>>, <<indices-get-mapping,`get-
|
||||
mapping`>>, <<indices-get-field-mapping,`get-field-mapping`>>,
|
||||
<<indices-update-settings,`update-settings`>>, <<indices-get-settings,`get-settings`>>,
|
||||
<<indices-warmers,`warmers`>>, and <<indices-aliases,`aliases`>> for more details.
|
||||
`warmers`, and <<indices-aliases,`aliases`>> for more details.
|
||||
|
||||
=== Index request
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ Add or update a mapping via the <<indices-create-index,create index>> or
|
|||
[float]
|
||||
=== Indices APIs
|
||||
|
||||
The <<warmer-retrieving, get warmer api>> will return a section for `warmers` even if there are
|
||||
The get warmer api will return a section for `warmers` even if there are
|
||||
no warmers. This ensures that the following two examples are equivalent:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -17,6 +17,16 @@ your application to Elasticsearch 3.0.
|
|||
* <<breaking_30_allocation>>
|
||||
|
||||
[[breaking_30_search_changes]]
|
||||
=== Warmers
|
||||
|
||||
Thanks to several changes like doc values by default or disk-based norms,
|
||||
warmers have become quite useless. As a consequence, warmers and the warmer
|
||||
API have been removed: it is not possible anymore to register queries that
|
||||
will run before a new IndexSearcher is published.
|
||||
|
||||
Don't worry if you have warmers defined on your indices, they will simply be
|
||||
ignored when upgrading to 3.0.
|
||||
|
||||
=== Search changes
|
||||
|
||||
==== `search_type=count` removed
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue