diff --git a/core/licenses/groovy-all-2.4.0-indy.jar.sha1 b/core/licenses/groovy-all-2.4.0-indy.jar.sha1 deleted file mode 100644 index 4eca02376e3..00000000000 --- a/core/licenses/groovy-all-2.4.0-indy.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8056886c1067b096c5ef05a024f1c5e9008e293 diff --git a/core/licenses/groovy-all-2.4.4-indy.jar.sha1 b/core/licenses/groovy-all-2.4.4-indy.jar.sha1 new file mode 100644 index 00000000000..458716cefdf --- /dev/null +++ b/core/licenses/groovy-all-2.4.4-indy.jar.sha1 @@ -0,0 +1 @@ +574a15e35eba5f986a0564ae197c78e843ece954 diff --git a/core/licenses/netty-3.10.0.Final.jar.sha1 b/core/licenses/netty-3.10.0.Final.jar.sha1 deleted file mode 100644 index b1f54f425ef..00000000000 --- a/core/licenses/netty-3.10.0.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad61cd1bba067e6634ddd3e160edf0727391ac30 diff --git a/core/licenses/netty-3.10.3.Final.jar.sha1 b/core/licenses/netty-3.10.3.Final.jar.sha1 new file mode 100644 index 00000000000..097fe6e8442 --- /dev/null +++ b/core/licenses/netty-3.10.3.Final.jar.sha1 @@ -0,0 +1 @@ +15adf7ddece077d7dc429db058981f528d1c899a \ No newline at end of file diff --git a/core/pom.xml b/core/pom.xml index d958fcee41a..13981dd6536 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -6,7 +6,7 @@ org.elasticsearch elasticsearch-parent - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT org.elasticsearch @@ -299,39 +299,6 @@ - - org.codehaus.mojo - exec-maven-plugin - 1.4.0 - - - - exec - - - - - ${jvm.executable} - - -Des.security.manager.enabled=false - -classpath - - org.elasticsearch.bootstrap.Bootstrap - -Xms256m - -Xmx1g - -Djava.awt.headless=true - -XX:+UseParNewGC - -XX:+UseConcMarkSweepGC - -XX:CMSInitiatingOccupancyFraction=75 - -XX:+UseCMSInitiatingOccupancyOnly - -XX:+HeapDumpOnOutOfMemoryError - -XX:+DisableExplicitGC - -Dfile.encoding=UTF-8 - -Djna.nosys=true - -Delasticsearch - - - org.apache.maven.plugins maven-source-plugin @@ -392,6 +359,7 @@ com.tdunning:t-digest org.apache.commons:commons-lang3 commons-cli:commons-cli + com.twitter:jsr166e @@ -1024,11 +992,24 @@ - + org.apache.maven.plugins maven-antrun-plugin + + + execute + package + + run + + + + + + + integ-setup @@ -1038,8 +1019,7 @@ - + @@ -1052,22 +1032,14 @@ - + + + - - org.apache.maven.plugins - maven-failsafe-plugin - - - 127.0.0.1:9300 - - - @@ -1111,6 +1083,31 @@ + + release + + + package.rpm + true + + + + + + org.codehaus.mojo + rpm-maven-plugin + + + attach-rpm + + attached-rpm + + + + + + + sign-rpm diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index db4164f9f9e..065617ce5ee 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -19,8 +19,6 @@ package org.apache.lucene.queryparser.classic; -import com.google.common.base.Objects; -import com.google.common.collect.ImmutableMap; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -33,6 +31,8 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.util.Version; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; @@ -42,6 +42,9 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.support.QueryParsers; +import com.google.common.base.Objects; +import com.google.common.collect.ImmutableMap; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -128,7 +131,7 @@ public class MapperQueryParser extends QueryParser { setLowercaseExpandedTerms(settings.lowercaseExpandedTerms()); setPhraseSlop(settings.phraseSlop()); setDefaultOperator(settings.defaultOperator()); - setFuzzyMinSim(settings.fuzzyMinSim()); + setFuzzyMinSim(settings.getFuzziness().asFloat()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); setLocale(settings.locale()); this.analyzeWildcard = settings.analyzeWildcard(); @@ -721,6 +724,15 @@ public class MapperQueryParser extends QueryParser { return super.getWildcardQuery(field, aggStr.toString()); } + @Override + protected WildcardQuery newWildcardQuery(Term t) { + // Backport: https://issues.apache.org/jira/browse/LUCENE-6677 + assert Version.LATEST == Version.LUCENE_5_2_1; + WildcardQuery query = new WildcardQuery(t, maxDeterminizedStates); + query.setRewriteMethod(multiTermRewriteMethod); + return query; + } + @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (lowercaseExpandedTerms) { diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java index ca364e486e1..e079e00303e 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.unit.Fuzziness; import org.joda.time.DateTimeZone; import java.util.Collection; @@ -49,7 +50,7 @@ public class QueryParserSettings { private boolean lowercaseExpandedTerms = true; private boolean enablePositionIncrements = true; private int phraseSlop = 0; - private float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity; + private Fuzziness fuzziness = Fuzziness.AUTO; private int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength; private int fuzzyMaxExpansions = FuzzyQuery.defaultMaxExpansions; private int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; @@ -158,14 +159,6 @@ public class QueryParserSettings { this.phraseSlop = phraseSlop; } - public float fuzzyMinSim() { - return fuzzyMinSim; - } - - public void fuzzyMinSim(float fuzzyMinSim) { - this.fuzzyMinSim = fuzzyMinSim; - } - public int fuzzyPrefixLength() { return fuzzyPrefixLength; } @@ -340,7 +333,7 @@ public class QueryParserSettings { if (enablePositionIncrements != that.enablePositionIncrements) return false; if (escape != that.escape) return false; if (analyzeWildcard != that.analyzeWildcard) return false; - if (Float.compare(that.fuzzyMinSim, fuzzyMinSim) != 0) return false; + if (fuzziness != null ? fuzziness.equals(that.fuzziness) == false : fuzziness != null) return false; if (fuzzyPrefixLength != that.fuzzyPrefixLength) return false; if (fuzzyMaxExpansions != that.fuzzyMaxExpansions) return false; if (fuzzyRewriteMethod != null ? !fuzzyRewriteMethod.equals(that.fuzzyRewriteMethod) : that.fuzzyRewriteMethod != null) @@ -395,7 +388,7 @@ public class QueryParserSettings { result = 31 * result + (lowercaseExpandedTerms ? 1 : 0); result = 31 * result + (enablePositionIncrements ? 1 : 0); result = 31 * result + phraseSlop; - result = 31 * result + (fuzzyMinSim != +0.0f ? Float.floatToIntBits(fuzzyMinSim) : 0); + result = 31 * result + (fuzziness.hashCode()); result = 31 * result + fuzzyPrefixLength; result = 31 * result + (escape ? 1 : 0); result = 31 * result + (defaultAnalyzer != null ? defaultAnalyzer.hashCode() : 0); @@ -413,4 +406,12 @@ public class QueryParserSettings { result = 31 * result + (timeZone != null ? timeZone.hashCode() : 0); return result; } + + public void setFuzziness(Fuzziness fuzziness) { + this.fuzziness = fuzziness; + } + + public Fuzziness getFuzziness() { + return fuzziness; + } } diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 789589f3f64..83be9f43ee6 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -26,7 +26,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException; import java.io.IOException; import java.lang.reflect.Constructor; @@ -39,6 +44,11 @@ import java.util.*; public class ElasticsearchException extends RuntimeException implements ToXContent { public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.skip_cause"; + private static final String INDEX_HEADER_KEY = "es.index"; + private static final String SHARD_HEADER_KEY = "es.shard"; + private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type"; + private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id"; + private static final Map> MAPPING; private final Map> headers = new HashMap<>(); @@ -252,7 +262,14 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } else { builder.field("type", getExceptionName()); builder.field("reason", getMessage()); + for (String key : headers.keySet()) { + if (key.startsWith("es.")) { + List values = headers.get(key); + xContentHeader(builder, key.substring("es.".length()), values); + } + } innerToXContent(builder, params); + renderHeader(builder, params); } return builder; } @@ -277,6 +294,38 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } } + protected final void renderHeader(XContentBuilder builder, Params params) throws IOException { + boolean hasHeader = false; + for (String key : headers.keySet()) { + if (key.startsWith("es.")) { + continue; + } + if (hasHeader == false) { + builder.startObject("header"); + hasHeader = true; + } + List values = headers.get(key); + xContentHeader(builder, key, values); + } + if (hasHeader) { + builder.endObject(); + } + } + + private void xContentHeader(XContentBuilder builder, String key, List values) throws IOException { + if (values != null && values.isEmpty() == false) { + if(values.size() == 1) { + builder.field(key, values.get(0)); + } else { + builder.startArray(key); + for (String value : values) { + builder.value(value); + } + builder.endArray(); + } + } + } + /** * Statis toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent. */ @@ -342,7 +391,15 @@ public class ElasticsearchException extends RuntimeException implements ToXConte @Override public String toString() { - return ExceptionsHelper.detailedMessage(this).trim(); + StringBuilder builder = new StringBuilder(); + if (headers.containsKey(INDEX_HEADER_KEY)) { + builder.append('[').append(getIndex()).append(']'); + if (headers.containsKey(SHARD_HEADER_KEY)) { + builder.append('[').append(getShardId()).append(']'); + } + builder.append(' '); + } + return builder.append(ExceptionsHelper.detailedMessage(this).trim()).toString(); } /** @@ -396,7 +453,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, org.elasticsearch.index.translog.TruncatedTranslogException.class, org.elasticsearch.repositories.RepositoryException.class, - org.elasticsearch.index.shard.IndexShardException.class, org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, @@ -421,12 +477,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, org.elasticsearch.search.query.QueryPhaseExecutionException.class, org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, - org.elasticsearch.index.shard.IndexShardCreationException.class, org.elasticsearch.index.percolator.PercolatorException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, org.elasticsearch.indices.InvalidIndexNameException.class, - org.elasticsearch.index.IndexException.class, org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.InvalidIndexTemplateException.class, @@ -443,7 +497,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.indices.TypeMissingException.class, - org.elasticsearch.index.IndexShardMissingException.class, org.elasticsearch.indices.InvalidTypeNameException.class, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, @@ -493,7 +546,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.ElasticsearchTimeoutException.class, org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.transport.SendRequestTransportException.class, - org.elasticsearch.indices.IndexMissingException.class, org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.indices.IndexAlreadyExistsException.class, org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, @@ -504,7 +556,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.IndexShardNotStartedException.class, org.elasticsearch.index.mapper.StrictDynamicMappingException.class, org.elasticsearch.index.engine.EngineClosedException.class, - org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException.class, + AliasesNotFoundException.class, org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, @@ -520,7 +572,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.engine.RecoveryEngineException.class, org.elasticsearch.common.blobstore.BlobStoreException.class, org.elasticsearch.index.snapshots.IndexShardRestoreException.class, - org.elasticsearch.index.store.StoreException.class, org.elasticsearch.index.query.QueryParsingException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, @@ -534,6 +585,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.engine.CreateFailedEngineException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException.class, ElasticsearchSecurityException.class, + ResourceNotFoundException.class, + IndexNotFoundException.class, + ShardNotFoundException.class, NotSerializableExceptionWrapper.class }; Map> mapping = new HashMap<>(exceptions.length); @@ -553,4 +607,73 @@ public class ElasticsearchException extends RuntimeException implements ToXConte MAPPING = Collections.unmodifiableMap(mapping); } + public String getIndex() { + List index = getHeader(INDEX_HEADER_KEY); + if (index != null && index.isEmpty() == false) { + return index.get(0); + } + + return null; + } + + public ShardId getShardId() { + List shard = getHeader(SHARD_HEADER_KEY); + if (shard != null && shard.isEmpty() == false) { + return new ShardId(getIndex(), Integer.parseInt(shard.get(0))); + } + return null; + } + + public void setIndex(Index index) { + if (index != null) { + addHeader(INDEX_HEADER_KEY, index.getName()); + } + } + + public void setIndex(String index) { + if (index != null) { + addHeader(INDEX_HEADER_KEY, index); + } + } + + public void setShard(ShardId shardId) { + if (shardId != null) { + addHeader(INDEX_HEADER_KEY, shardId.getIndex()); + addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id())); + } + } + + public void setResources(String type, String... id) { + assert type != null; + addHeader(RESOURCE_HEADER_ID_KEY, id); + addHeader(RESOURCE_HEADER_TYPE_KEY, type); + } + + public List getResourceId() { + return getHeader(RESOURCE_HEADER_ID_KEY); + } + + public String getResourceType() { + List header = getHeader(RESOURCE_HEADER_TYPE_KEY); + if (header != null && header.isEmpty() == false) { + assert header.size() == 1; + return header.get(0); + } + return null; + } + + public static void renderThrowable(XContentBuilder builder, Params params, Throwable t) throws IOException { + builder.startObject("error"); + final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t); + builder.field("root_cause"); + builder.startArray(); + for (ElasticsearchException rootCause : rootCauses){ + builder.startObject(); + rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params)); + builder.endObject(); + } + builder.endArray(); + ElasticsearchException.toXContent(builder, params, t); + builder.endObject(); + } } diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index c9f51cb5d55..df7be834ebe 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -240,12 +238,12 @@ public final class ExceptionsHelper { static class GroupBy { final String reason; - final Index index; + final String index; final Class causeType; public GroupBy(Throwable t) { - if (t instanceof IndexException) { - index = ((IndexException) t).index(); + if (t instanceof ElasticsearchException) { + index = ((ElasticsearchException) t).getIndex(); } else { index = null; } diff --git a/core/src/main/java/org/elasticsearch/index/IndexShardMissingException.java b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java similarity index 65% rename from core/src/main/java/org/elasticsearch/index/IndexShardMissingException.java rename to core/src/main/java/org/elasticsearch/ResourceNotFoundException.java index 6b356ee685f..d38de2e3bc1 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexShardMissingException.java +++ b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java @@ -16,31 +16,32 @@ * specific language governing permissions and limitations * under the License. */ - -package org.elasticsearch.index; +package org.elasticsearch; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; /** - * + * Generic ResourceNotFoundException corresponding to the {@link RestStatus#NOT_FOUND} status code */ -public class IndexShardMissingException extends IndexShardException { +public class ResourceNotFoundException extends ElasticsearchException { - public IndexShardMissingException(ShardId shardId) { - super(shardId, "missing"); + public ResourceNotFoundException(String msg, Object... args) { + super(msg, args); } - public IndexShardMissingException(StreamInput in) throws IOException{ + protected ResourceNotFoundException(String msg, Throwable cause, Object... args) { + super(msg, cause, args); + } + + public ResourceNotFoundException(StreamInput in) throws IOException { super(in); } @Override - public RestStatus status() { + public final RestStatus status() { return RestStatus.NOT_FOUND; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 60ea851b065..208770393f4 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -241,13 +241,17 @@ public class Version { public static final int V_1_6_0_ID = 1060099; public static final Version V_1_6_0 = new Version(V_1_6_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_6_1_ID = 1060199; - public static final Version V_1_6_1 = new Version(V_1_6_1_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_6_1 = new Version(V_1_6_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_6_2_ID = 1060299; + public static final Version V_1_6_2 = new Version(V_1_6_2_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_7_0_ID = 1070099; - public static final Version V_1_7_0 = new Version(V_1_7_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_2_0_0_ID = 2000099; - public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_1_7_0 = new Version(V_1_7_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_7_1_ID = 1070199; + public static final Version V_1_7_1 = new Version(V_1_7_1_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_2_0_0_beta1_ID = 2000001; + public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final Version CURRENT = V_2_0_0; + public static final Version CURRENT = V_2_0_0_beta1; static { assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]"; @@ -259,10 +263,14 @@ public class Version { public static Version fromId(int id) { switch (id) { - case V_2_0_0_ID: - return V_2_0_0; + case V_2_0_0_beta1_ID: + return V_2_0_0_beta1; + case V_1_7_1_ID: + return V_1_7_1; case V_1_7_0_ID: return V_1_7_0; + case V_1_6_2_ID: + return V_1_6_2; case V_1_6_1_ID: return V_1_6_1; case V_1_6_0_ID: @@ -472,7 +480,7 @@ public class Version { public static Version indexCreated(Settings indexSettings) { final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null); if (indexVersion == null) { - throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]"); + throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) + "]"); } return indexVersion; } @@ -499,7 +507,7 @@ public class Version { if (snapshot = version.endsWith("-SNAPSHOT")) { version = version.substring(0, version.length() - 9); } - String[] parts = version.split("\\."); + String[] parts = version.split("\\.|\\-"); if (parts.length < 3 || parts.length > 4) { throw new IllegalArgumentException("the version needs to contain major, minor and revision, and optionally the build: " + version); } @@ -515,10 +523,10 @@ public class Version { int build = 99; if (parts.length == 4) { String buildStr = parts[3]; - if (buildStr.startsWith("Beta")) { + if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { build = Integer.parseInt(buildStr.substring(4)); } - if (buildStr.startsWith("RC")) { + if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) { build = Integer.parseInt(buildStr.substring(2)) + 50; } } @@ -589,10 +597,20 @@ public class Version { public String number() { StringBuilder sb = new StringBuilder(); sb.append(major).append('.').append(minor).append('.').append(revision); - if (build < 50) { - sb.append(".Beta").append(build); + if (isBeta()) { + if (major >= 2) { + sb.append("-beta"); + } else { + sb.append(".Beta"); + } + sb.append(build); } else if (build < 99) { - sb.append(".RC").append(build - 50); + if (major >= 2) { + sb.append("-rc"); + } else { + sb.append(".RC"); + } + sb.append(build - 50); } return sb.toString(); } @@ -635,6 +653,14 @@ public class Version { return id; } + public boolean isBeta() { + return build < 50; + } + + public boolean isRC() { + return build > 50 && build < 99; + } + public static class Module extends AbstractModule { private final Version version; diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 2ceb9a976ea..07ded699dcc 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -96,6 +96,8 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.TransportIndicesSegmentsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; @@ -242,6 +244,7 @@ public class ActionModule extends AbstractModule { registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); + registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java index 8d26006ffa5..b804d7f3858 100644 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.support.IndicesOptions; * * Forces this class return index/type/id getters */ -public interface DocumentRequest { +public interface DocumentRequest extends IndicesRequest { /** * Get the index that this request operates on diff --git a/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java index 3ab5869045f..00562af99c5 100644 --- a/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java +++ b/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java @@ -19,8 +19,8 @@ package org.elasticsearch.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -29,18 +29,19 @@ import java.io.IOException; /** * */ -public class NoShardAvailableActionException extends IndexShardException { +public class NoShardAvailableActionException extends ElasticsearchException { public NoShardAvailableActionException(ShardId shardId) { - super(shardId, null); + this(shardId, null); } public NoShardAvailableActionException(ShardId shardId, String msg) { - super(shardId, msg); + this(shardId, msg, null); } public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java index 920d48b952a..86bca96d744 100644 --- a/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java +++ b/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java @@ -32,8 +32,6 @@ import java.util.Objects; */ public class RoutingMissingException extends ElasticsearchException { - private final String index; - private final String type; private final String id; @@ -43,20 +41,16 @@ public class RoutingMissingException extends ElasticsearchException { Objects.requireNonNull(index, "index must not be null"); Objects.requireNonNull(type, "type must not be null"); Objects.requireNonNull(id, "id must not be null"); - this.index = index; + setIndex(index); this.type = type; this.id = id; } - public String index() { - return index; - } - - public String type() { + public String getType() { return type; } - public String id() { + public String getId() { return id; } @@ -67,7 +61,6 @@ public class RoutingMissingException extends ElasticsearchException { public RoutingMissingException(StreamInput in) throws IOException{ super(in); - index = in.readString(); type = in.readString(); id = in.readString(); } @@ -75,7 +68,6 @@ public class RoutingMissingException extends ElasticsearchException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); out.writeString(type); out.writeString(id); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 258d6072201..cb94778de51 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -107,11 +107,11 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable, Streama private int numberOfReplicas; - int activeShards = 0; + private int activeShards = 0; - int relocatingShards = 0; + private int relocatingShards = 0; - int initializingShards = 0; + private int initializingShards = 0; - int unassignedShards = 0; + private int unassignedShards = 0; - int activePrimaryShards = 0; + private int activePrimaryShards = 0; - ClusterHealthStatus status = ClusterHealthStatus.RED; + private ClusterHealthStatus status = ClusterHealthStatus.RED; - final Map shards = Maps.newHashMap(); + private final Map shards = Maps.newHashMap(); - List validationFailures; + private List validationFailures; private ClusterIndexHealth() { } @@ -77,33 +77,8 @@ public class ClusterIndexHealth implements Iterable, Streama this.validationFailures = indexRoutingTable.validate(indexMetaData); for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { - ClusterShardHealth shardHealth = new ClusterShardHealth(shardRoutingTable.shardId().id()); - for (ShardRouting shardRouting : shardRoutingTable) { - if (shardRouting.active()) { - shardHealth.activeShards++; - if (shardRouting.relocating()) { - // the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it - shardHealth.relocatingShards++; - } - if (shardRouting.primary()) { - shardHealth.primaryActive = true; - } - } else if (shardRouting.initializing()) { - shardHealth.initializingShards++; - } else if (shardRouting.unassigned()) { - shardHealth.unassignedShards++; - } - } - if (shardHealth.primaryActive) { - if (shardHealth.activeShards == shardRoutingTable.size()) { - shardHealth.status = ClusterHealthStatus.GREEN; - } else { - shardHealth.status = ClusterHealthStatus.YELLOW; - } - } else { - shardHealth.status = ClusterHealthStatus.RED; - } - shards.put(shardHealth.getId(), shardHealth); + int shardId = shardRoutingTable.shardId().id(); + shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable)); } // update the index status @@ -113,10 +88,10 @@ public class ClusterIndexHealth implements Iterable, Streama if (shardHealth.isPrimaryActive()) { activePrimaryShards++; } - activeShards += shardHealth.activeShards; - relocatingShards += shardHealth.relocatingShards; - initializingShards += shardHealth.initializingShards; - unassignedShards += shardHealth.unassignedShards; + activeShards += shardHealth.getActiveShards(); + relocatingShards += shardHealth.getRelocatingShards(); + initializingShards += shardHealth.getInitializingShards(); + unassignedShards += shardHealth.getUnassignedShards(); if (shardHealth.getStatus() == ClusterHealthStatus.RED) { status = ClusterHealthStatus.RED; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java index 5625ba3b886..34914d30093 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterShardHealth.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.cluster.health; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -34,22 +36,47 @@ public class ClusterShardHealth implements Streamable { ClusterHealthStatus status = ClusterHealthStatus.RED; - int activeShards = 0; + private int activeShards = 0; - int relocatingShards = 0; + private int relocatingShards = 0; - int initializingShards = 0; + private int initializingShards = 0; - int unassignedShards = 0; + private int unassignedShards = 0; - boolean primaryActive = false; + private boolean primaryActive = false; private ClusterShardHealth() { } - ClusterShardHealth(int shardId) { + public ClusterShardHealth(int shardId, final IndexShardRoutingTable shardRoutingTable) { this.shardId = shardId; + for (ShardRouting shardRouting : shardRoutingTable) { + if (shardRouting.active()) { + activeShards++; + if (shardRouting.relocating()) { + // the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it + relocatingShards++; + } + if (shardRouting.primary()) { + primaryActive = true; + } + } else if (shardRouting.initializing()) { + initializingShards++; + } else if (shardRouting.unassigned()) { + unassignedShards++; + } + } + if (primaryActive) { + if (activeShards == shardRoutingTable.size()) { + status = ClusterHealthStatus.GREEN; + } else { + status = ClusterHealthStatus.YELLOW; + } + } else { + status = ClusterHealthStatus.RED; + } } public int getId() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index c9d3ec27f4e..06d8d6361b6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -25,13 +25,14 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -44,9 +45,10 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< private final GatewayAllocator gatewayAllocator; @Inject - public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ClusterName clusterName, ActionFilters actionFilters, GatewayAllocator gatewayAllocator) { - super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterHealthRequest.class); + public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) { + super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterHealthRequest.class); this.clusterName = clusterName; this.gatewayAllocator = gatewayAllocator; } @@ -199,9 +201,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< } if (request.indices().length > 0) { try { - clusterState.metaData().concreteIndices(IndicesOptions.strictExpand(), request.indices()); + indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices()); waitForCounter++; - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { response.status = ClusterHealthStatus.RED; // no indices, make sure its RED // missing indices, wait a bit more... } @@ -266,8 +268,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< String[] concreteIndices; try { - concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); - } catch (IndexMissingException e) { + concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(settings, clusterState), diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index a73982e5c0c..7f2c1994197 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -45,9 +46,10 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction(size); } + /** + * @return an ordered list based on plugins name + */ public List getInfos() { + Collections.sort(infos, new Comparator() { + @Override + public int compare(final PluginInfo o1, final PluginInfo o2) { + return o1.getName().compareTo(o2.getName()); + } + }); + return infos; } @@ -70,7 +82,7 @@ public class PluginsInfo implements Streamable, ToXContent { @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(infos.size()); - for (PluginInfo plugin : infos) { + for (PluginInfo plugin : getInfos()) { plugin.writeTo(out); } } @@ -78,7 +90,7 @@ public class PluginsInfo implements Streamable, ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(Fields.PLUGINS); - for (PluginInfo pluginInfo : infos) { + for (PluginInfo pluginInfo : getInfos()) { pluginInfo.toXContent(builder, params); } builder.endArray(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 74221fc79ed..c76b5893085 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,9 +48,9 @@ public class TransportNodesInfoAction extends TransportNodesAction attr : node.attributes().entrySet()) { - builder.field(attr.getKey(), attr.getValue()); - } - builder.endObject(); - } - builder.endObject(); + node.toXContent(builder, params); } builder.endObject(); builder.startArray("shards"); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 10ef0348e46..b9a372ea074 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -45,8 +46,9 @@ import static com.google.common.collect.Sets.newHashSet; public class TransportClusterSearchShardsAction extends TransportMasterNodeReadAction { @Inject - public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterSearchShardsRequest.class); + public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterSearchShardsRequest.class); } @Override @@ -57,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); } @Override @@ -68,10 +70,10 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); - Map> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Set nodeIds = newHashSet(); - GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); ShardRouting shard; ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 23ab2c24dab..bb0b73048e8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,8 +43,9 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction 0) { - String[] indices = currentState.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request); for (String filteredIndex : indices) { IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex); if (indexMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 57b8f3956b3..a23eb0dec09 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -65,9 +66,10 @@ public class TransportClusterStatsAction extends TransportNodesAction aliases = new HashSet<>(); for (AliasActions action : actions) { //expand indices - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), action.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices()); //collect the aliases Collections.addAll(aliases, action.aliases()); for (String index : concreteIndices) { @@ -100,7 +102,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction { @Inject - public TransportAliasesExistAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest.class); + public TransportAliasesExistAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetAliasesRequest.class); } @Override @@ -48,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices); listener.onResponse(new AliasesExistResponse(result)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index a1088d4fbcd..496b8a3e8d1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -39,8 +40,9 @@ import java.util.List; public class TransportGetAliasesAction extends TransportMasterNodeReadAction { @Inject - public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest.class); + public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetAliasesRequest.class); } @Override @@ -51,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index c8bc92be75a..e41f935ce47 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -34,14 +34,14 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.*; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.internal.AllFieldMapper; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.IndicesAnalysisService; @@ -63,8 +63,9 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction @Inject public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters) { - super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, AnalyzeRequest.class, ThreadPool.Names.INDEX); + IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, AnalyzeRequest.class, ThreadPool.Names.INDEX); this.indicesService = indicesService; this.indicesAnalysisService = indicesAnalysisService; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 83f5abcad1e..e77b7009fb0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; @@ -55,8 +56,9 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction listener) { - final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 32c26dc47c6..7ec34156b4d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -43,8 +44,9 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices.length == 0) { listener.onResponse(new DeleteIndexResponse(true)); return; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 019e8c2f34b..254b5fa0c65 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -27,9 +27,10 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,8 +41,8 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< @Inject public TransportIndicesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest.class); + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, IndicesExistsRequest.class); } @Override @@ -59,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, clusterService.state().metaData().concreteIndices(indicesOptions, request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices())); } @Override @@ -67,9 +68,9 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. - clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); + indexNameExpressionResolver.concreteIndices(state, request); exists = true; - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { exists = false; } listener.onResponse(new IndicesExistsResponse(exists)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index ef1dc16b190..ad792ae61dc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -39,8 +40,8 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); return; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index e546d6f616b..c6c3c316f68 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; @@ -50,8 +51,10 @@ public class TransportFlushAction extends TransportBroadcastAction listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); final AtomicInteger indexCounter = new AtomicInteger(); final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index a65cbc781c5..80b5cf9e6b3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; @@ -65,11 +66,10 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO private final IndicesService indicesService; @Inject - public TransportGetFieldMappingsIndexAction(Settings settings, ClusterService clusterService, - TransportService transportService, - IndicesService indicesService, - ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, GetFieldMappingsIndexRequest.class, ThreadPool.Names.MANAGEMENT); + public TransportGetFieldMappingsIndexAction(Settings settings, ClusterService clusterService, TransportService transportService, + IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, GetFieldMappingsIndexRequest.class, ThreadPool.Names.MANAGEMENT); this.clusterService = clusterService; this.indicesService = indicesService; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 03eb5ed3595..13336f1a712 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -38,8 +39,9 @@ import org.elasticsearch.transport.TransportService; public class TransportGetMappingsAction extends TransportClusterInfoAction { @Inject - public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest.class); + public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetMappingsRequest.class); } @Override @@ -50,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction listener) { - final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 1df33c56463..1e3abb0257f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -45,8 +46,9 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java index 17a18bae971..6b5416985e7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; @@ -51,8 +52,9 @@ public class TransportOptimizeAction extends TransportBroadcastAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); for (String concreteIndex : concreteIndices) { IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 1278b97934b..8f1c9b09372 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -43,8 +44,8 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) .settings(request.settings()) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java new file mode 100644 index 00000000000..949e32fbb1e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shards; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder for {@link IndicesShardStoresRequest} + */ +public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder { + + public IndicesShardStoreRequestBuilder(ElasticsearchClient client, Action action, String... indices) { + super(client, action, new IndicesShardStoresRequest(indices)); + } + + /** + * Sets the indices for the shard stores request + */ + public IndicesShardStoreRequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + /** + * Specifies what type of requested indices to ignore and wildcard indices expressions + * By default, expands wildcards to both open and closed indices + */ + public IndicesShardStoreRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + request.indicesOptions(indicesOptions); + return this; + } + + /** + * Set statuses to filter shards to get stores info on. + * @param shardStatuses acceptable values are "green", "yellow", "red" and "all" + * see {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus} for details + */ + public IndicesShardStoreRequestBuilder setShardStatuses(String... shardStatuses) { + request.shardStatuses(shardStatuses); + return this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java new file mode 100644 index 00000000000..6475d92ccbd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shards; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for {@link TransportIndicesShardStoresAction} + * + * Exposes shard store information for requested indices. + * Shard store information reports which nodes hold shard copies, how recent they are + * and any exceptions on opening the shard index or from previous engine failures + */ +public class IndicesShardStoresAction extends Action { + + public static final IndicesShardStoresAction INSTANCE = new IndicesShardStoresAction(); + public static final String NAME = "indices:monitor/shard_stores"; + + private IndicesShardStoresAction() { + super(NAME); + } + + @Override + public IndicesShardStoresResponse newResponse() { + return new IndicesShardStoresResponse(); + } + + @Override + public IndicesShardStoreRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new IndicesShardStoreRequestBuilder(client, this); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java new file mode 100644 index 00000000000..0c0b3386580 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.shards; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.EnumSet; + +/** + * Request for {@link IndicesShardStoresAction} + */ +public class IndicesShardStoresRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpand(); + private EnumSet statuses = EnumSet.of(ClusterHealthStatus.YELLOW, ClusterHealthStatus.RED); + + /** + * Create a request for shard stores info for indices + */ + public IndicesShardStoresRequest(String... indices) { + this.indices = indices; + } + + IndicesShardStoresRequest() { + } + + /** + * Set statuses to filter shards to get stores info on. + * see {@link ClusterHealthStatus} for details. + * Defaults to "yellow" and "red" status + * @param shardStatuses acceptable values are "green", "yellow", "red" and "all" + */ + public IndicesShardStoresRequest shardStatuses(String... shardStatuses) { + statuses = EnumSet.noneOf(ClusterHealthStatus.class); + for (String statusString : shardStatuses) { + if ("all".equalsIgnoreCase(statusString)) { + statuses = EnumSet.allOf(ClusterHealthStatus.class); + return this; + } + statuses.add(ClusterHealthStatus.fromString(statusString)); + } + return this; + } + + /** + * Specifies what type of requested indices to ignore and wildcard indices expressions + * By default, expands wildcards to both open and closed indices + */ + public IndicesShardStoresRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + /** + * Sets the indices for the shard stores request + */ + @Override + public IndicesShardStoresRequest indices(String... indices) { + this.indices = indices; + return this; + } + + /** + * Returns the shard criteria to get store information on + */ + public EnumSet shardStatuses() { + return statuses; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(indices); + out.writeVInt(statuses.size()); + for (ClusterHealthStatus status : statuses) { + out.writeByte(status.value()); + } + indicesOptions.writeIndicesOptions(out); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + indices = in.readStringArray(); + int nStatus = in.readVInt(); + statuses = EnumSet.noneOf(ClusterHealthStatus.class); + for (int i = 0; i < nStatus; i++) { + statuses.add(ClusterHealthStatus.fromValue(in.readByte())); + } + indicesOptions = IndicesOptions.readIndicesOptions(in); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java new file mode 100644 index 00000000000..50d305efe90 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -0,0 +1,385 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shards; + +import com.carrotsearch.hppc.cursors.IntObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableList; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.*; + +/** + * Response for {@link IndicesShardStoresAction} + * + * Consists of {@link StoreStatus}s for requested indices grouped by + * indices and shard ids and a list of encountered node {@link Failure}s + */ +public class IndicesShardStoresResponse extends ActionResponse implements ToXContent { + + /** + * Shard store information from a node + */ + public static class StoreStatus implements Streamable, ToXContent, Comparable { + private DiscoveryNode node; + private long version; + private Throwable storeException; + private Allocation allocation; + + /** + * The status of the shard store with respect to the cluster + */ + public enum Allocation { + + /** + * Allocated as primary + */ + PRIMARY((byte) 0), + + /** + * Allocated as a replica + */ + REPLICA((byte) 1), + + /** + * Not allocated + */ + UNUSED((byte) 2); + + private final byte id; + + Allocation(byte id) { + this.id = id; + } + + private static Allocation fromId(byte id) { + switch (id) { + case 0: return PRIMARY; + case 1: return REPLICA; + case 2: return UNUSED; + default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + } + } + + public String value() { + switch (id) { + case 0: return "primary"; + case 1: return "replica"; + case 2: return "unused"; + default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + } + } + + private static Allocation readFrom(StreamInput in) throws IOException { + return fromId(in.readByte()); + } + + private void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + } + + private StoreStatus() { + } + + public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { + this.node = node; + this.version = version; + this.allocation = allocation; + this.storeException = storeException; + } + + /** + * Node the store belongs to + */ + public DiscoveryNode getNode() { + return node; + } + + /** + * Version of the store, used to select the store that will be + * used as a primary. + */ + public long getVersion() { + return version; + } + + /** + * Exception while trying to open the + * shard index or from when the shard failed + */ + public Throwable getStoreException() { + return storeException; + } + + /** + * The allocation status of the store. + * {@link Allocation#PRIMARY} indicates a primary shard copy + * {@link Allocation#REPLICA} indicates a replica shard copy + * {@link Allocation#UNUSED} indicates an unused shard copy + */ + public Allocation getAllocation() { + return allocation; + } + + static StoreStatus readStoreStatus(StreamInput in) throws IOException { + StoreStatus storeStatus = new StoreStatus(); + storeStatus.readFrom(in); + return storeStatus; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + node = DiscoveryNode.readNode(in); + version = in.readLong(); + allocation = Allocation.readFrom(in); + if (in.readBoolean()) { + storeException = in.readThrowable(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + node.writeTo(out); + out.writeLong(version); + allocation.writeTo(out); + if (storeException != null) { + out.writeBoolean(true); + out.writeThrowable(storeException); + } else { + out.writeBoolean(false); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + node.toXContent(builder, params); + builder.field(Fields.VERSION, version); + builder.field(Fields.ALLOCATED, allocation.value()); + if (storeException != null) { + builder.startObject(Fields.STORE_EXCEPTION); + ElasticsearchException.toXContent(builder, params, storeException); + builder.endObject(); + } + return builder; + } + + @Override + public int compareTo(StoreStatus other) { + if (storeException != null && other.storeException == null) { + return 1; + } else if (other.storeException != null && storeException == null) { + return -1; + } else { + int compare = Long.compare(other.version, version); + if (compare == 0) { + return Integer.compare(allocation.id, other.allocation.id); + } + return compare; + } + } + } + + /** + * Single node failure while retrieving shard store information + */ + public static class Failure extends DefaultShardOperationFailedException { + private String nodeId; + + public Failure(String nodeId, String index, int shardId, Throwable reason) { + super(index, shardId, reason); + this.nodeId = nodeId; + } + + private Failure() { + } + + public String nodeId() { + return nodeId; + } + + public static Failure readFailure(StreamInput in) throws IOException { + Failure failure = new Failure(); + failure.readFrom(in); + return failure; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + nodeId = in.readString(); + super.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(nodeId); + super.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("node", nodeId()); + super.toXContent(builder, params); + return builder; + } + } + + private ImmutableOpenMap>> storeStatuses; + private ImmutableList failures; + + public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, ImmutableList failures) { + this.storeStatuses = storeStatuses; + this.failures = failures; + } + + IndicesShardStoresResponse() { + this(ImmutableOpenMap.>>of(), ImmutableList.of()); + } + + /** + * Returns {@link StoreStatus}s + * grouped by their index names and shard ids. + */ + public ImmutableOpenMap>> getStoreStatuses() { + return storeStatuses; + } + + /** + * Returns node {@link Failure}s encountered + * while executing the request + */ + public ImmutableList getFailures() { + return failures; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int numResponse = in.readVInt(); + ImmutableOpenMap.Builder>> storeStatusesBuilder = ImmutableOpenMap.builder(); + for (int i = 0; i < numResponse; i++) { + String index = in.readString(); + int indexEntries = in.readVInt(); + ImmutableOpenIntMap.Builder> shardEntries = ImmutableOpenIntMap.builder(); + for (int shardCount = 0; shardCount < indexEntries; shardCount++) { + int shardID = in.readInt(); + int nodeEntries = in.readVInt(); + List storeStatuses = new ArrayList<>(nodeEntries); + for (int nodeCount = 0; nodeCount < nodeEntries; nodeCount++) { + storeStatuses.add(readStoreStatus(in)); + } + shardEntries.put(shardID, storeStatuses); + } + storeStatusesBuilder.put(index, shardEntries.build()); + } + int numFailure = in.readVInt(); + ImmutableList.Builder failureBuilder = ImmutableList.builder(); + for (int i = 0; i < numFailure; i++) { + failureBuilder.add(Failure.readFailure(in)); + } + storeStatuses = storeStatusesBuilder.build(); + failures = failureBuilder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(storeStatuses.size()); + for (ObjectObjectCursor>> indexShards : storeStatuses) { + out.writeString(indexShards.key); + out.writeVInt(indexShards.value.size()); + for (IntObjectCursor> shardStatusesEntry : indexShards.value) { + out.writeInt(shardStatusesEntry.key); + out.writeVInt(shardStatusesEntry.value.size()); + for (StoreStatus storeStatus : shardStatusesEntry.value) { + storeStatus.writeTo(out); + } + } + } + out.writeVInt(failures.size()); + for (ShardOperationFailedException failure : failures) { + failure.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (failures.size() > 0) { + builder.startArray(Fields.FAILURES); + for (ShardOperationFailedException failure : failures) { + builder.startObject(); + failure.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + } + + builder.startObject(Fields.INDICES); + for (ObjectObjectCursor>> indexShards : storeStatuses) { + builder.startObject(indexShards.key); + + builder.startObject(Fields.SHARDS); + for (IntObjectCursor> shardStatusesEntry : indexShards.value) { + builder.startObject(String.valueOf(shardStatusesEntry.key)); + builder.startArray(Fields.STORES); + for (StoreStatus storeStatus : shardStatusesEntry.value) { + builder.startObject(); + storeStatus.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + + builder.endObject(); + } + builder.endObject(); + + builder.endObject(); + } + builder.endObject(); + return builder; + } + + static final class Fields { + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString FAILURES = new XContentBuilderString("failures"); + static final XContentBuilderString STORES = new XContentBuilderString("stores"); + // StoreStatus fields + static final XContentBuilderString VERSION = new XContentBuilderString("version"); + static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); + static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java new file mode 100644 index 00000000000..099ba54598d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.shards; + +import com.google.common.collect.ImmutableList; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; +import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.gateway.AsyncShardFetch; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific indices + * and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} + */ +public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction { + + private final TransportNodesListGatewayStartedShards listShardStoresInfo; + + @Inject + public TransportIndicesShardStoresAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, TransportNodesListGatewayStartedShards listShardStoresInfo) { + super(settings, IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, IndicesShardStoresRequest.class); + this.listShardStoresInfo = listShardStoresInfo; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected IndicesShardStoresResponse newResponse() { + return new IndicesShardStoresResponse(); + } + + @Override + protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener listener) { + final RoutingTable routingTables = state.routingTable(); + final RoutingNodes routingNodes = state.routingNodes(); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Set shardIdsToFetch = new HashSet<>(); + + // collect relevant shard ids of the requested indices for fetching store infos + for (String index : concreteIndices) { + IndexRoutingTable indexShardRoutingTables = routingTables.index(index); + if (indexShardRoutingTables == null) { + continue; + } + for (IndexShardRoutingTable routing : indexShardRoutingTables) { + ClusterShardHealth shardHealth = new ClusterShardHealth(routing.shardId().id(), routing); + if (request.shardStatuses().contains(shardHealth.getStatus())) { + shardIdsToFetch.add(routing.shardId()); + } + } + } + + // async fetch store infos from all the nodes + // NOTE: instead of fetching shard store info one by one from every node (nShards * nNodes requests) + // we could fetch all shard store info from every node once (nNodes requests) + // we have to implement a TransportNodesAction instead of using TransportNodesListGatewayStartedShards + // for fetching shard stores info, that operates on a list of shards instead of a single shard + new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, state.metaData(), shardIdsToFetch, listener).start(); + } + + @Override + protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); + } + + private class AsyncShardStoresInfoFetches { + private final DiscoveryNodes nodes; + private final RoutingNodes routingNodes; + private final MetaData metaData; + private final Set shardIds; + private final ActionListener listener; + private CountDown expectedOps; + private final Queue fetchResponses; + + AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, MetaData metaData, Set shardIds, ActionListener listener) { + this.nodes = nodes; + this.routingNodes = routingNodes; + this.metaData = metaData; + this.shardIds = shardIds; + this.listener = listener; + this.fetchResponses = new ConcurrentLinkedQueue<>(); + this.expectedOps = new CountDown(shardIds.size()); + } + + void start() { + if (shardIds.isEmpty()) { + listener.onResponse(new IndicesShardStoresResponse()); + } else { + for (ShardId shardId : shardIds) { + InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shardId, listShardStoresInfo); + fetch.fetchData(nodes, metaData, Collections.emptySet()); + } + } + } + + private class InternalAsyncFetch extends AsyncShardFetch { + + InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) { + super(logger, type, shardId, action); + } + + @Override + protected synchronized void processAsyncFetch(ShardId shardId, NodeGatewayStartedShards[] responses, FailedNodeException[] failures) { + fetchResponses.add(new Response(shardId, responses, failures)); + if (expectedOps.countDown()) { + finish(); + } + } + + void finish() { + ImmutableOpenMap.Builder>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); + ImmutableList.Builder failureBuilder = ImmutableList.builder(); + for (Response fetchResponse : fetchResponses) { + ImmutableOpenIntMap> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex()); + final ImmutableOpenIntMap.Builder> indexShardsBuilder; + if (indexStoreStatuses == null) { + indexShardsBuilder = ImmutableOpenIntMap.builder(); + } else { + indexShardsBuilder = ImmutableOpenIntMap.builder(indexStoreStatuses); + } + java.util.List storeStatuses = indexShardsBuilder.get(fetchResponse.shardId.id()); + if (storeStatuses == null) { + storeStatuses = new ArrayList<>(); + } + for (NodeGatewayStartedShards response : fetchResponse.responses) { + if (shardExistsInNode(response)) { + IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); + } + } + CollectionUtil.timSort(storeStatuses); + indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses); + indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndex(), indexShardsBuilder.build()); + for (FailedNodeException failure : fetchResponse.failures) { + failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause())); + } + } + listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), failureBuilder.build())); + } + + private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { + for (ShardRouting shardRouting : routingNodes.node(node.id())) { + ShardId shardId = shardRouting.shardId(); + if (shardId.id() == shardID && shardId.getIndex().equals(index)) { + if (shardRouting.primary()) { + return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; + } else if (shardRouting.assignedToNode()) { + return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; + } else { + return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + } + } + } + return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + } + + /** + * A shard exists/existed in a node only if shard state file exists in the node + */ + private boolean shardExistsInNode(final NodeGatewayStartedShards response) { + return response.storeException() != null || response.version() != -1; + } + + @Override + protected void reroute(ShardId shardId, String reason) { + // no-op + } + + public class Response { + private final ShardId shardId; + private final NodeGatewayStartedShards[] responses; + private final FailedNodeException[] failures; + + public Response(ShardId shardId, NodeGatewayStartedShards[] responses, FailedNodeException[] failures) { + this.shardId = shardId; + this.responses = responses; + this.failures = failures; + } + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index c6cf00dfbc0..b298cdff75b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; @@ -37,9 +38,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexShardMissingException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -57,9 +58,10 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction { @Inject - public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexTemplatesRequest.class); + public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetIndexTemplatesRequest.class); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 492dbf352c7..186d2288841 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -40,8 +41,9 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

> routingMap = clusterState.metaData().resolveSearchRouting(Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, "_local"); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local"); } @Override @@ -168,7 +173,8 @@ public class TransportValidateQueryAction extends TransportBroadcastAction { @Inject - public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteWarmerRequest.class); + public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteWarmerRequest.class); } @Override @@ -67,12 +68,12 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask(request, listener) { @Override @@ -94,7 +95,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction { @Inject - public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, GetWarmersRequest.class); + public TransportGetWarmersAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetWarmersAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetWarmersRequest.class); } @Override @@ -54,7 +56,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction 0) { @@ -127,7 +127,7 @@ public class TransportPutWarmerAction extends TransportMasterNodeAction indices = new HashMap<>(); - private final MetaData metaData; - ConcreteIndices(MetaData metaData) { - this.metaData = metaData; + ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) { + this.state = state; + this.indexNameExpressionResolver = indexNameExpressionResolver; } String getConcreteIndex(String indexOrAlias) { return indices.get(indexOrAlias); } - String resolveIfAbsent(String indexOrAlias, IndicesOptions indicesOptions) { - String concreteIndex = indices.get(indexOrAlias); + String resolveIfAbsent(DocumentRequest request) { + String concreteIndex = indices.get(request.index()); if (concreteIndex == null) { - concreteIndex = metaData.concreteSingleIndex(indexOrAlias, indicesOptions); - indices.put(indexOrAlias, concreteIndex); + concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request); + indices.put(request.index(), concreteIndex); } return concreteIndex; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 09157f7ba4c..70b3e1d7ac5 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.common.bytes.BytesReference; @@ -78,9 +79,10 @@ public class TransportShardBulkAction extends TransportReplicationAction> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); } @Override @@ -151,7 +154,9 @@ public class TransportExistsAction extends TransportBroadcastAction, ToXContent { @@ -40,17 +43,17 @@ public class MultiGetResponse extends ActionResponse implements Iterablenull if there was error. - */ - @Nullable - public PercolateResponse response() { - return response; - } - - /** - * @return An error description if there was an error or null if the percolate request was successful - */ - @Nullable - public String errorMessage() { - return errorMessage; - } /** * @return The percolator response or null if there was error. @@ -154,7 +138,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterablefalse is returned. */ public boolean isFailure() { - return errorMessage != null; + return throwable != null; + } + + public Throwable getFailure() { + return throwable; } @Override @@ -171,7 +159,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterable> routing = clusterState.metaData().resolveSearchRouting(percolateRequest.routing(), percolateRequest.indices()); + Map> routing = indexNameExpressionResolver.resolveSearchRouting(clusterState, percolateRequest.routing(), percolateRequest.indices()); // TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction GroupShardsIterator shards = clusterService.operationRouting().searchShards( - clusterState, percolateRequest.indices(), concreteIndices, routing, percolateRequest.preference() + clusterState, concreteIndices, routing, percolateRequest.preference() ); if (shards.size() == 0) { reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available")); @@ -184,7 +184,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index adca1883470..036197d483d 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -56,8 +57,10 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi private static final String ACTION_NAME = MultiPercolateAction.NAME + "[shard]"; @Inject - public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, PercolatorService percolatorService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, Request.class, ThreadPool.Names.PERCOLATE); this.percolatorService = percolatorService; } diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 0d7ae75d66f..80745652be3 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -154,27 +154,13 @@ public class MultiSearchResponse extends ActionResponse implements Iterable> routingMap = clusterState.metaData().resolveSearchRouting(searchRequest.routing(), searchRequest.indices()); - int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, searchRequest.indices(), concreteIndices, routingMap, searchRequest.preference()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); + int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); if (shardCount == 1) { // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_AND_FETCH); } - } catch (IndexMissingException|IndexClosedException e) { + } catch (IndexNotFoundException | IndexClosedException e) { // ignore these failures, we will notify the search response if its really the case from the actual action } catch (Exception e) { logger.debug("failed to optimize search type, continue as normal", e); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 0a36b6664e8..97b9f6e8082 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchA import org.elasticsearch.action.search.type.TransportSearchScrollScanAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -47,8 +48,9 @@ public class TransportSearchScrollAction extends HandledTransportAction> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); - shardsIts = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); expectedSuccessfulOps = shardsIts.size(); // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); @@ -157,7 +159,7 @@ public abstract class TransportSearchTypeAction extends TransportAction() { @Override public void onResponse(FirstResult result) { diff --git a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index 27caf314b0d..2269856c072 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; @@ -65,8 +66,9 @@ public class TransportSuggestAction extends TransportBroadcastAction> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 313692d75b1..17d8bcfb998 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -24,9 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -46,12 +44,12 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile private RestStatus status; - private DefaultShardOperationFailedException() { + protected DefaultShardOperationFailedException() { } - public DefaultShardOperationFailedException(IndexShardException e) { - this.index = e.shardId().index().name(); - this.shardId = e.shardId().id(); + public DefaultShardOperationFailedException(ElasticsearchException e) { + this.index = e.getIndex(); + this.shardId = e.getShardId().id(); this.reason = e; this.status = e.status(); } diff --git a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 001b410842e..e92eff3f29d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -32,8 +33,8 @@ import org.elasticsearch.transport.TransportService; */ public abstract class HandledTransportAction extends TransportAction{ - protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, Class request) { - super(settings, actionName, threadPool, actionFilters); + protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Class request) { + super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver); transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); } diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index cc349332ac9..d278a992e93 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.*; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.logging.ESLogger; @@ -39,13 +40,16 @@ public abstract class TransportAction execute(Request request) { diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportActions.java b/core/src/main/java/org/elasticsearch/action/support/TransportActions.java index 0b4b72befb5..1de3ef1aea8 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportActions.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportActions.java @@ -21,9 +21,9 @@ package org.elasticsearch.action.support; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.shard.ShardNotFoundException; /** */ @@ -31,16 +31,10 @@ public class TransportActions { public static boolean isShardNotAvailableException(Throwable t) { Throwable actual = ExceptionsHelper.unwrapCause(t); - if (actual instanceof IllegalIndexShardStateException) { - return true; - } - if (actual instanceof IndexMissingException) { - return true; - } - if (actual instanceof IndexShardMissingException) { - return true; - } - if (actual instanceof NoShardAvailableActionException) { + if (actual instanceof ShardNotFoundException || + actual instanceof IndexNotFoundException || + actual instanceof IllegalIndexShardStateException || + actual instanceof NoShardAvailableActionException) { return true; } return false; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java index ca761470e21..fb42c7a6e42 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.support.broadcast; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchWrapperException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -31,18 +31,19 @@ import java.io.IOException; * * */ -public class BroadcastShardOperationFailedException extends IndexShardException implements ElasticsearchWrapperException { +public class BroadcastShardOperationFailedException extends ElasticsearchException implements ElasticsearchWrapperException { public BroadcastShardOperationFailedException(ShardId shardId, String msg) { - super(shardId, msg, null); + this(shardId, msg, null); } public BroadcastShardOperationFailedException(ShardId shardId, Throwable cause) { - super(shardId, "", cause); + this(shardId, "", cause); } public BroadcastShardOperationFailedException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public BroadcastShardOperationFailedException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index c77f3ec766b..dcb6952dc35 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -52,9 +53,10 @@ public abstract class TransportBroadcastAction request, Class shardRequest, String shardExecutor) { - super(settings, actionName, threadPool, transportService, actionFilters, request); + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.clusterService = clusterService; this.transportService = transportService; this.threadPool = threadPool; @@ -108,7 +110,7 @@ public abstract class TransportBroadcastAction request) { - super(settings, actionName, threadPool, transportService, actionFilters, request); + protected TransportMasterNodeAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.transportService = transportService; this.clusterService = clusterService; this.executor = executor(); diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java index c33b9fde774..3faeb50bba2 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.support.master; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,8 +37,10 @@ public abstract class TransportMasterNodeReadAction request) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters,request); + protected TransportMasterNodeReadAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Class request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); this.forceLocal = settings.getAsBoolean(FORCE_LOCAL_SETTING, null); } diff --git a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index d1bdb86e1bb..560a699ddf1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -32,8 +33,10 @@ import org.elasticsearch.transport.TransportService; */ public abstract class TransportClusterInfoAction extends TransportMasterNodeReadAction { - public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Class request) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters, request); + public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Class request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request); } @Override @@ -44,7 +47,7 @@ public abstract class TransportClusterInfoAction listener) { - String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); doMasterOperation(request, concreteIndices, state, listener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 50e84379059..8383189f3ef 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.Settings; @@ -49,8 +50,9 @@ public abstract class TransportNodesAction request, Class nodeRequest, String nodeExecutor) { - super(settings, actionName, threadPool, transportService, actionFilters, request); + IndexNameExpressionResolver indexNameExpressionResolver, Class request, Class nodeRequest, + String nodeExecutor) { + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.clusterName = clusterName; this.clusterService = clusterService; this.transportService = transportService; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index cac18813cfd..2e538385cb2 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionWriteResponse; @@ -39,6 +40,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.Nullable; @@ -57,7 +59,6 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; @@ -92,8 +93,9 @@ public abstract class TransportReplicationAction request, Class replicaRequest, String executor) { - super(settings, actionName, threadPool, actionFilters); + IndexNameExpressionResolver indexNameExpressionResolver, Class request, + Class replicaRequest, String executor) { + super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver); this.transportService = transportService; this.clusterService = clusterService; this.indicesService = indicesService; @@ -240,10 +242,11 @@ public abstract class TransportReplicationAction request, String executor) { - super(settings, actionName, threadPool, transportService, actionFilters, request); + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.clusterService = clusterService; this.transportService = transportService; @@ -110,7 +113,7 @@ public abstract class TransportSingleCustomOperationAction request) { - super(settings, actionName, threadPool, transportService, actionFilters, request); + protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.clusterService = clusterService; this.transportService = transportService; this.executor = executor(); @@ -128,7 +131,7 @@ public abstract class TransportInstanceSingleOperationAction request, String executor) { - super(settings, actionName, threadPool, actionFilters); + super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver); this.clusterService = clusterService; this.transportService = transportService; @@ -125,7 +127,7 @@ public abstract class TransportSingleShardAction> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference()); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); + return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index e14d687a860..1bc6ddd5a0e 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.single.instance.TransportInstanceSingleOperationAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -73,8 +74,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Inject public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction, - UpdateHelper updateHelper, ActionFilters actionFilters, IndicesService indicesService) { - super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, UpdateRequest.class); + UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IndicesService indicesService) { + super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest.class); this.indexAction = indexAction; this.deleteAction = deleteAction; this.createIndexAction = createIndexAction; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index c27d81abbeb..dd3ea21c5ef 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLogger; @@ -27,7 +28,6 @@ import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; -import java.net.URLDecoder; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; @@ -90,7 +90,7 @@ public class JarHell { logger.debug("excluding system resource: {}", path); continue; } - if (path.endsWith(".jar")) { + if (path.toString().endsWith(".jar")) { if (!seenJars.add(path)) { logger.debug("excluding duplicate classpath element: {}", path); continue; // we can't fail because of sheistiness with joda-time @@ -99,23 +99,7 @@ public class JarHell { try (JarFile file = new JarFile(path.toString())) { Manifest manifest = file.getManifest(); if (manifest != null) { - // inspect Manifest: give a nice error if jar requires a newer java version - String systemVersion = System.getProperty("java.specification.version"); - String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); - if (targetVersion != null) { - float current = Float.POSITIVE_INFINITY; - float target = Float.NEGATIVE_INFINITY; - try { - current = Float.parseFloat(systemVersion); - target = Float.parseFloat(targetVersion); - } catch (NumberFormatException e) { - // some spec changed, time for a more complex parser - } - if (current < target) { - throw new IllegalStateException(path + " requires Java " + targetVersion - + ", your system: " + systemVersion); - } - } + checkManifest(manifest, path); } // inspect entries Enumeration elements = file.entries(); @@ -149,6 +133,35 @@ public class JarHell { } } + /** inspect manifest for sure incompatibilities */ + static void checkManifest(Manifest manifest, Path jar) { + // give a nice error if jar requires a newer java version + String systemVersion = System.getProperty("java.specification.version"); + String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); + if (targetVersion != null) { + float current = Float.POSITIVE_INFINITY; + float target = Float.NEGATIVE_INFINITY; + try { + current = Float.parseFloat(systemVersion); + target = Float.parseFloat(targetVersion); + } catch (NumberFormatException e) { + // some spec changed, time for a more complex parser + } + if (current < target) { + throw new IllegalStateException(jar + " requires Java " + targetVersion + + ", your system: " + systemVersion); + } + } + + // give a nice error if jar is compiled against different es version + String systemESVersion = Version.CURRENT.toString(); + String targetESVersion = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); + if (targetESVersion != null && targetESVersion.equals(systemESVersion) == false) { + throw new IllegalStateException(jar + " requires Elasticsearch " + targetESVersion + + ", your system: " + systemESVersion); + } + } + static void checkClass(Map clazzes, String clazz, Path jarpath) { Path previous = clazzes.put(clazz, jarpath); if (previous != null) { diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 7ab5078b04b..755bf333e59 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -81,6 +81,9 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -221,6 +224,29 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ IndicesSegmentsRequestBuilder prepareSegments(String... indices); + /** + * The shard stores info of one or more indices. + * + * @param request The indices shard stores request + * @return The result future + * @see Requests#indicesShardStoresRequest(String...) + */ + ActionFuture shardStores(IndicesShardStoresRequest request); + + /** + * The shard stores info of one or more indices. + * + * @param request The indices shard stores request + * @param listener A listener to be notified with a result + * @see Requests#indicesShardStoresRequest(String...) + */ + void shardStores(IndicesShardStoresRequest request, ActionListener listener); + + /** + * The shard stores info of one or more indices. + */ + IndicesShardStoreRequestBuilder prepareShardStores(String... indices); + /** * Creates an index using an explicit request allowing to specify the settings of the index. * diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 8a70c18b374..e36c26923d8 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -49,6 +49,7 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.count.CountRequest; @@ -184,6 +185,15 @@ public class Requests { return new IndicesSegmentsRequest(indices); } + /** + * Creates an indices shard stores info request. + * @param indices The indices to get shard store information on + * @return The indices shard stores request + * @see org.elasticsearch.client.IndicesAdminClient#shardStores(IndicesShardStoresRequest) + */ + public static IndicesShardStoresRequest indicesShardStoresRequest(String... indices) { + return new IndicesShardStoresRequest(indices); + } /** * Creates an indices exists request. * diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index a4d271dc599..f9abf2f0437 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -176,6 +176,10 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; @@ -1498,6 +1502,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new IndicesSegmentsRequestBuilder(this, IndicesSegmentsAction.INSTANCE).setIndices(indices); } + @Override + public ActionFuture shardStores(IndicesShardStoresRequest request) { + return execute(IndicesShardStoresAction.INSTANCE, request); + } + + @Override + public void shardStores(IndicesShardStoresRequest request, ActionListener listener) { + execute(IndicesShardStoresAction.INSTANCE, request, listener); + } + + @Override + public IndicesShardStoreRequestBuilder prepareShardStores(String... indices) { + return new IndicesShardStoreRequestBuilder(this, IndicesShardStoresAction.INSTANCE, indices); + } + @Override public ActionFuture updateSettings(final UpdateSettingsRequest request) { return execute(UpdateSettingsAction.INSTANCE, request); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index d8e81d5a90a..b0b3c120c95 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -79,6 +79,7 @@ public class ClusterModule extends AbstractModule implements SpawnModules { bind(MetaDataIndexAliasesService.class).asEagerSingleton(); bind(MetaDataUpdateSettingsService.class).asEagerSingleton(); bind(MetaDataIndexTemplateService.class).asEagerSingleton(); + bind(IndexNameExpressionResolver.class).asEagerSingleton(); bind(RoutingService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index ab44768d5a4..8fb86677487 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -76,7 +76,7 @@ import java.util.Map; * to a node if this node was present in the previous version of the cluster state. If a node is not present was * not present in the previous version of the cluster state, such node is unlikely to have the previous cluster * state version and should be sent a complete version. In order to make sure that the differences are applied to - * correct version of the cluster state, each cluster state version update generates {@link #uuid} that uniquely + * correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely * identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to * makes sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method * throws the {@link IncompatibleClusterStateVersionException}, which should cause the publishing mechanism to send @@ -144,7 +144,7 @@ public class ClusterState implements ToXContent, Diffable { private final long version; - private final String uuid; + private final String stateUUID; private final RoutingTable routingTable; @@ -165,13 +165,13 @@ public class ClusterState implements ToXContent, Diffable { private volatile ClusterStateStatus status; - public ClusterState(long version, String uuid, ClusterState state) { - this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); + public ClusterState(long version, String stateUUID, ClusterState state) { + this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); } - public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { + public ClusterState(ClusterName clusterName, long version, String stateUUID, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { this.version = version; - this.uuid = uuid; + this.stateUUID = stateUUID; this.clusterName = clusterName; this.metaData = metaData; this.routingTable = routingTable; @@ -200,11 +200,11 @@ public class ClusterState implements ToXContent, Diffable { } /** - * This uuid is automatically generated for for each version of cluster state. It is used to make sure that + * This stateUUID is automatically generated for for each version of cluster state. It is used to make sure that * we are applying diffs to the right previous state. */ - public String uuid() { - return this.uuid; + public String stateUUID() { + return this.stateUUID; } public DiscoveryNodes nodes() { @@ -283,7 +283,7 @@ public class ClusterState implements ToXContent, Diffable { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("version: ").append(version).append("\n"); - sb.append("uuid: ").append(uuid).append("\n"); + sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); sb.append(nodes().prettyPrint()); @@ -362,7 +362,7 @@ public class ClusterState implements ToXContent, Diffable { if (metrics.contains(Metric.VERSION)) { builder.field("version", version); - builder.field("uuid", uuid); + builder.field("state_uuid", stateUUID); } if (metrics.contains(Metric.MASTER_NODE)) { @@ -398,18 +398,8 @@ public class ClusterState implements ToXContent, Diffable { // nodes if (metrics.contains(Metric.NODES)) { builder.startObject("nodes"); - for (DiscoveryNode node : nodes()) { - builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("name", node.name()); - builder.field("transport_address", node.address().toString()); - - builder.startObject("attributes"); - for (Map.Entry attr : node.attributes().entrySet()) { - builder.field(attr.getKey(), attr.getValue()); - } - builder.endObject(); - - builder.endObject(); + for (DiscoveryNode node : nodes) { + node.toXContent(builder, params); } builder.endObject(); } @@ -417,7 +407,7 @@ public class ClusterState implements ToXContent, Diffable { // meta data if (metrics.contains(Metric.METADATA)) { builder.startObject("metadata"); - + builder.field("cluster_uuid", metaData().clusterUUID()); builder.startObject("templates"); for (ObjectCursor cursor : metaData().templates().values()) { IndexTemplateMetaData templateMetaData = cursor.value; @@ -571,7 +561,7 @@ public class ClusterState implements ToXContent, Diffable { public Builder(ClusterState state) { this.clusterName = state.clusterName; this.version = state.version(); - this.uuid = state.uuid(); + this.uuid = state.stateUUID(); this.nodes = state.nodes(); this.routingTable = state.routingTable(); this.metaData = state.metaData(); @@ -637,7 +627,7 @@ public class ClusterState implements ToXContent, Diffable { return this; } - public Builder uuid(String uuid) { + public Builder stateUUID(String uuid) { this.uuid = uuid; return this; } @@ -734,7 +724,7 @@ public class ClusterState implements ToXContent, Diffable { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeLong(version); - out.writeString(uuid); + out.writeString(stateUUID); metaData.writeTo(out); routingTable.writeTo(out); nodes.writeTo(out); @@ -767,8 +757,8 @@ public class ClusterState implements ToXContent, Diffable { private final Diff> customs; public ClusterStateDiff(ClusterState before, ClusterState after) { - fromUuid = before.uuid; - toUuid = after.uuid; + fromUuid = before.stateUUID; + toUuid = after.stateUUID; toVersion = after.version; clusterName = after.clusterName; routingTable = after.routingTable.diff(before.routingTable); @@ -816,14 +806,14 @@ public class ClusterState implements ToXContent, Diffable { @Override public ClusterState apply(ClusterState state) { Builder builder = new Builder(clusterName); - if (toUuid.equals(state.uuid)) { + if (toUuid.equals(state.stateUUID)) { // no need to read the rest - cluster state didn't change return state; } - if (fromUuid.equals(state.uuid) == false) { - throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid); + if (fromUuid.equals(state.stateUUID) == false) { + throw new IncompatibleClusterStateVersionException(state.version, state.stateUUID, toVersion, fromUuid); } - builder.uuid(toUuid); + builder.stateUUID(toUuid); builder.version(toVersion); builder.routingTable(routingTable.apply(state.routingTable)); builder.nodes(nodes.apply(state.nodes)); diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 80420e325eb..41ddb49bb65 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -30,11 +31,13 @@ import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.threadpool.ThreadPool; @@ -76,22 +79,22 @@ public class ShardStateAction extends AbstractComponent { transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardFailedTransportHandler()); } - public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) { + public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { logger.warn("can't send shard failed for {}, no master known.", shardRouting); return; } - innerShardFailed(shardRouting, indexUUID, reason, masterNode); + innerShardFailed(shardRouting, indexUUID, masterNode, message, failure); } - public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) { - logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", shardRouting.shardId(), shardRouting, indexUUID, reason); - innerShardFailed(shardRouting, indexUUID, reason, masterNode); + public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, @Nullable final Throwable failure) { + logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", failure, shardRouting.shardId(), shardRouting, indexUUID, message); + innerShardFailed(shardRouting, indexUUID, masterNode, message, failure); } - private void innerShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) { - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason); + private void innerShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, final Throwable failure) { + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure); transportService.sendRequest(masterNode, SHARD_FAILED_ACTION_NAME, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override @@ -104,20 +107,17 @@ public class ShardStateAction extends AbstractComponent { public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { - logger.warn("can't send shard started for {}. no master known.", shardRouting); + logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting); return; } shardStarted(shardRouting, indexUUID, reason, masterNode); } public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { - - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason); - - logger.debug("sending shard started for {}", shardRoutingEntry); - + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); + logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); transportService.sendRequest(masterNode, - SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleException(TransportException exp) { logger.warn("failed to send shard started to [{}]", exp, masterNode); @@ -127,9 +127,9 @@ public class ShardStateAction extends AbstractComponent { } private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.warn("{} received shard failed for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); + logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); failedShardQueue.add(shardRoutingEntry); - clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.reason + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -145,26 +145,12 @@ public class ShardStateAction extends AbstractComponent { return currentState; } - MetaData metaData = currentState.getMetaData(); - List shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size()); - for (int i = 0; i < shardRoutingEntries.size(); i++) { - ShardRoutingEntry shardRoutingEntry = shardRoutingEntries.get(i); - shardRoutingEntry.processed = true; - ShardRouting shardRouting = shardRoutingEntry.shardRouting; - IndexMetaData indexMetaData = metaData.index(shardRouting.index()); - // if there is no metadata or the current index is not of the right uuid, the index has been deleted while it was being allocated - // which is fine, we should just ignore this - if (indexMetaData == null) { - continue; - } - if (!indexMetaData.isSameUUID(shardRoutingEntry.indexUUID)) { - logger.debug("{} ignoring shard failed, different index uuid, current {}, got {}", shardRouting.shardId(), indexMetaData.getUUID(), shardRoutingEntry); - continue; - } - logger.debug("{} will apply shard failed {}", shardRouting.shardId(), shardRoutingEntry); - shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(shardRouting, shardRoutingEntry.reason)); + // mark all entries as processed + for (ShardRoutingEntry entry : shardRoutingEntries) { + entry.processed = true; + shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure)); } RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied); @@ -197,7 +183,7 @@ public class ShardStateAction extends AbstractComponent { // process started events as fast as possible, to make shards available startedShardsQueue.add(shardRoutingEntry); - clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.reason + "]", Priority.URGENT, + clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -214,59 +200,12 @@ public class ShardStateAction extends AbstractComponent { return currentState; } - RoutingTable routingTable = currentState.routingTable(); - MetaData metaData = currentState.getMetaData(); - List shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size()); - for (int i = 0; i < shardRoutingEntries.size(); i++) { - ShardRoutingEntry shardRoutingEntry = shardRoutingEntries.get(i); - shardRoutingEntry.processed = true; - ShardRouting shardRouting = shardRoutingEntry.shardRouting; - try { - IndexMetaData indexMetaData = metaData.index(shardRouting.index()); - IndexRoutingTable indexRoutingTable = routingTable.index(shardRouting.index()); - // if there is no metadata, no routing table or the current index is not of the right uuid, the index has been deleted while it was being allocated - // which is fine, we should just ignore this - if (indexMetaData == null) { - continue; - } - if (indexRoutingTable == null) { - continue; - } - - if (!indexMetaData.isSameUUID(shardRoutingEntry.indexUUID)) { - logger.debug("{} ignoring shard started, different index uuid, current {}, got {}", shardRouting.shardId(), indexMetaData.getUUID(), shardRoutingEntry); - continue; - } - - // find the one that maps to us, if its already started, no need to do anything... - // the shard might already be started since the nodes that is starting the shards might get cluster events - // with the shard still initializing, and it will try and start it again (until the verification comes) - - IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardRouting.id()); - - boolean applyShardEvent = true; - - for (ShardRouting entry : indexShardRoutingTable) { - if (shardRouting.currentNodeId().equals(entry.currentNodeId())) { - // we found the same shard that exists on the same node id - if (!entry.initializing()) { - // shard is in initialized state, skipping event (probable already started) - logger.debug("{} ignoring shard started event for {}, current state: {}", shardRouting.shardId(), shardRoutingEntry, entry.state()); - applyShardEvent = false; - } - } - } - - if (applyShardEvent) { - shardRoutingToBeApplied.add(shardRouting); - logger.debug("{} will apply shard started {}", shardRouting.shardId(), shardRoutingEntry); - } - - } catch (Throwable t) { - logger.error("{} unexpected failure while processing shard started [{}]", t, shardRouting.shardId(), shardRouting); - } + // mark all entries as processed + for (ShardRoutingEntry entry : shardRoutingEntries) { + entry.processed = true; + shardRoutingToBeApplied.add(entry.shardRouting); } if (shardRoutingToBeApplied.isEmpty()) { @@ -307,42 +246,44 @@ public class ShardStateAction extends AbstractComponent { static class ShardRoutingEntry extends TransportRequest { - private ShardRouting shardRouting; - - private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; - - private String reason; + ShardRouting shardRouting; + String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; + String message; + Throwable failure; volatile boolean processed; // state field, no need to serialize ShardRoutingEntry() { } - private ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String reason) { + ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) { this.shardRouting = shardRouting; - this.reason = reason; this.indexUUID = indexUUID; + this.message = message; + this.failure = failure; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); shardRouting = readShardRoutingEntry(in); - reason = in.readString(); indexUUID = in.readString(); + message = in.readString(); + failure = in.readThrowable(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardRouting.writeTo(out); - out.writeString(reason); out.writeString(indexUUID); + out.writeString(message); + out.writeThrowable(failure); } @Override public String toString() { - return "" + shardRouting + ", indexUUID [" + indexUUID + "], reason [" + reason + "]"; + return "" + shardRouting + ", indexUUID [" + indexUUID + "], message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]"; } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index c5d603ccca6..f12824de4ee 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -145,7 +145,6 @@ public class AliasValidator extends AbstractComponent { QueryParseContext context = indexQueryParserService.getParseContext(); try { context.reset(parser); - context.setAllowUnmappedFields(false); context.parseInnerFilter(); } finally { context.reset(null); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 4e7998ca381..deea1cec308 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.HashFunction; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedXContent; @@ -40,10 +41,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.warmer.IndexWarmersMetaData; @@ -64,7 +62,7 @@ import static org.elasticsearch.common.settings.Settings.*; /** * */ -public class IndexMetaData implements Diffable { +public class IndexMetaData implements Diffable, FromXContentBuilder, ToXContent { public static final IndexMetaData PROTO = IndexMetaData.builder("") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) @@ -169,7 +167,7 @@ public class IndexMetaData implements Diffable { public static final String SETTING_CREATION_DATE = "index.creation_date"; public static final String SETTING_PRIORITY = "index.priority"; public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string"; - public static final String SETTING_UUID = "index.uuid"; + public static final String SETTING_INDEX_UUID = "index.uuid"; public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type"; public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type"; public static final String SETTING_DATA_PATH = "index.data_path"; @@ -268,12 +266,12 @@ public class IndexMetaData implements Diffable { return index(); } - public String uuid() { - return settings.get(SETTING_UUID, INDEX_UUID_NA_VALUE); + public String indexUUID() { + return settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); } - public String getUUID() { - return uuid(); + public String getIndexUUID() { + return indexUUID(); } /** @@ -281,11 +279,11 @@ public class IndexMetaData implements Diffable { */ public boolean isSameUUID(String otherUUID) { assert otherUUID != null; - assert uuid() != null; - if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(uuid())) { + assert indexUUID() != null; + if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(indexUUID())) { return true; } - return otherUUID.equals(getUUID()); + return otherUUID.equals(getIndexUUID()); } public long version() { @@ -515,6 +513,17 @@ public class IndexMetaData implements Diffable { return new IndexMetaDataDiff(in); } + @Override + public IndexMetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + return Builder.fromXContent(parser); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Builder.toXContent(this, builder, params); + return builder; + } + private static class IndexMetaDataDiff implements Diff { private final String index; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java new file mode 100644 index 00000000000..77c35fdcf50 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -0,0 +1,618 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.indices.IndexClosedException; + +import java.util.*; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; + +public class IndexNameExpressionResolver { + + private final ImmutableList expressionResolvers; + + @Inject + public IndexNameExpressionResolver() { + expressionResolvers = ImmutableList.of(new WildcardExpressionResolver()); + } + + /** + * Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options + * are encapsulated in the specified request. + */ + public String[] concreteIndices(ClusterState state, IndicesRequest request) { + Context context = new Context(state, request.indicesOptions()); + return concreteIndices(context, request.indices()); + } + + /** + * Translates the provided index expression into actual concrete indices. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param indexExpressions expressions that can be resolved to alias or index names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public String[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { + Context context = new Context(state, options); + return concreteIndices(context, indexExpressions); + } + + String[] concreteIndices(Context context, String... indexExpressions) { + if (indexExpressions == null || indexExpressions.length == 0) { + indexExpressions = new String[]{MetaData.ALL}; + } + MetaData metaData = context.getState().metaData(); + IndicesOptions options = context.getOptions(); + boolean failClosed = options.forbidClosedIndices() && options.ignoreUnavailable() == false; + boolean failNoIndices = options.ignoreUnavailable() == false; + // If only one index is specified then whether we fail a request if an index is missing depends on the allow_no_indices + // option. At some point we should change this, because there shouldn't be a reason why whether a single index + // or multiple indices are specified yield different behaviour. + if (indexExpressions.length == 1) { + failNoIndices = options.allowNoIndices() == false; + } + + List expressions = Arrays.asList(indexExpressions); + for (ExpressionResolver expressionResolver : expressionResolvers) { + expressions = expressionResolver.resolve(context, expressions); + } + + if (expressions.isEmpty()) { + if (!options.allowNoIndices()) { + IndexNotFoundException infe = new IndexNotFoundException((String)null); + infe.setResources("index_expression", indexExpressions); + throw infe; + } else { + return Strings.EMPTY_ARRAY; + } + } + + List concreteIndices = new ArrayList<>(expressions.size()); + for (String expression : expressions) { + List indexMetaDatas; + IndexMetaData indexMetaData = metaData.getIndices().get(expression); + if (indexMetaData == null) { + ImmutableOpenMap indexAliasMap = metaData.aliases().get(expression); + if (indexAliasMap == null) { + if (failNoIndices) { + IndexNotFoundException infe = new IndexNotFoundException(expression); + infe.setResources("index_expression", expression); + throw infe; + + } else { + continue; + } + } + if (indexAliasMap.size() > 1 && !options.allowAliasesToMultipleIndices()) { + throw new IllegalArgumentException("Alias [" + expression + "] has more than one indices associated with it [" + Arrays.toString(indexAliasMap.keys().toArray(String.class)) + "], can't execute a single index op"); + } + indexMetaDatas = new ArrayList<>(indexAliasMap.size()); + for (ObjectObjectCursor cursor : indexAliasMap) { + indexMetaDatas.add(metaData.getIndices().get(cursor.key)); + } + } else { + indexMetaDatas = Collections.singletonList(indexMetaData); + } + + for (IndexMetaData found : indexMetaDatas) { + if (found.getState() == IndexMetaData.State.CLOSE) { + if (failClosed) { + throw new IndexClosedException(new Index(found.getIndex())); + } else { + if (options.forbidClosedIndices() == false) { + concreteIndices.add(found.getIndex()); + } + } + } else if (found.getState() == IndexMetaData.State.OPEN) { + concreteIndices.add(found.getIndex()); + } else { + throw new IllegalStateException("index state [" + found.getState() + "] not supported"); + } + } + } + + if (options.allowNoIndices() == false && concreteIndices.isEmpty()) { + IndexNotFoundException infe = new IndexNotFoundException((String)null); + infe.setResources("index_expression", indexExpressions); + throw infe; + } + return concreteIndices.toArray(new String[concreteIndices.size()]); + } + + /** + * Utility method that allows to resolve an index expression to its corresponding single concrete index. + * Callers should make sure they provide proper {@link org.elasticsearch.action.support.IndicesOptions} + * that require a single index as a result. The indices resolution must in fact return a single index when + * using this method, an {@link IllegalArgumentException} gets thrown otherwise. + * + * @param request request containing the index or alias to be resolved to concrete index and + * the indices options to be used for the index resolution + * @throws IndexNotFoundException if the resolved index or alias provided doesn't exist + * @throws IllegalArgumentException if the index resolution lead to more than one index + * @return the concrete index obtained as a result of the index resolution + */ + public String concreteSingleIndex(ClusterState state, IndicesRequest request) { + String indexOrAlias = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null; + String[] indices = concreteIndices(state, request.indicesOptions(), indexOrAlias); + if (indices.length != 1) { + throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); + } + return indices[0]; + } + + /** + * Iterates through the list of indices and selects the effective list of filtering aliases for the + * given index. + *

+ *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to + * the index itself - null is returned. Returns null if no filtering is required.

+ */ + public String[] filteringAliases(ClusterState state, String index, String... expressions) { + // expand the aliases wildcard + List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); + Context context = new Context(state, IndicesOptions.lenientExpandOpen()); + for (ExpressionResolver expressionResolver : expressionResolvers) { + resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); + } + + if (isAllIndices(resolvedExpressions)) { + return null; + } + // optimize for the most common single index/alias scenario + if (resolvedExpressions.size() == 1) { + String alias = resolvedExpressions.get(0); + IndexMetaData indexMetaData = state.metaData().getIndices().get(index); + if (indexMetaData == null) { + // Shouldn't happen + throw new IndexNotFoundException(index); + } + AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); + boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired(); + if (!filteringRequired) { + return null; + } + return new String[]{alias}; + } + List filteringAliases = null; + for (String alias : resolvedExpressions) { + if (alias.equals(index)) { + return null; + } + + IndexMetaData indexMetaData = state.metaData().getIndices().get(index); + if (indexMetaData == null) { + // Shouldn't happen + throw new IndexNotFoundException(index); + } + + AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); + // Check that this is an alias for the current index + // Otherwise - skip it + if (aliasMetaData != null) { + boolean filteringRequired = aliasMetaData.filteringRequired(); + if (filteringRequired) { + // If filtering required - add it to the list of filters + if (filteringAliases == null) { + filteringAliases = newArrayList(); + } + filteringAliases.add(alias); + } else { + // If not, we have a non filtering alias for this index - no filtering needed + return null; + } + } + } + if (filteringAliases == null) { + return null; + } + return filteringAliases.toArray(new String[filteringAliases.size()]); + } + + /** + * Resolves the search routing if in the expression aliases are used. If expressions point to concrete indices + * or aliases with no routing defined the specified routing is used. + * + * @return routing values grouped by concrete index + */ + public Map> resolveSearchRouting(ClusterState state, @Nullable String routing, String... expressions) { + List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); + Context context = new Context(state, IndicesOptions.lenientExpandOpen()); + for (ExpressionResolver expressionResolver : expressionResolvers) { + resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); + } + + if (isAllIndices(resolvedExpressions)) { + return resolveSearchRoutingAllIndices(state.metaData(), routing); + } + + if (resolvedExpressions.size() == 1) { + return resolveSearchRoutingSingleValue(state.metaData(), routing, resolvedExpressions.get(0)); + } + + Map> routings = null; + Set paramRouting = null; + // List of indices that don't require any routing + Set norouting = new HashSet<>(); + if (routing != null) { + paramRouting = Strings.splitStringByCommaToSet(routing); + } + + for (String expression : resolvedExpressions) { + ImmutableOpenMap indexToRoutingMap = state.metaData().getAliases().get(expression); + if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { + for (ObjectObjectCursor indexRouting : indexToRoutingMap) { + if (!norouting.contains(indexRouting.key)) { + if (!indexRouting.value.searchRoutingValues().isEmpty()) { + // Routing alias + if (routings == null) { + routings = newHashMap(); + } + Set r = routings.get(indexRouting.key); + if (r == null) { + r = new HashSet<>(); + routings.put(indexRouting.key, r); + } + r.addAll(indexRouting.value.searchRoutingValues()); + if (paramRouting != null) { + r.retainAll(paramRouting); + } + if (r.isEmpty()) { + routings.remove(indexRouting.key); + } + } else { + // Non-routing alias + if (!norouting.contains(indexRouting.key)) { + norouting.add(indexRouting.key); + if (paramRouting != null) { + Set r = new HashSet<>(paramRouting); + if (routings == null) { + routings = newHashMap(); + } + routings.put(indexRouting.key, r); + } else { + if (routings != null) { + routings.remove(indexRouting.key); + } + } + } + } + } + } + } else { + // Index + if (!norouting.contains(expression)) { + norouting.add(expression); + if (paramRouting != null) { + Set r = new HashSet<>(paramRouting); + if (routings == null) { + routings = newHashMap(); + } + routings.put(expression, r); + } else { + if (routings != null) { + routings.remove(expression); + } + } + } + } + + } + if (routings == null || routings.isEmpty()) { + return null; + } + return routings; + } + + private Map> resolveSearchRoutingSingleValue(MetaData metaData, @Nullable String routing, String aliasOrIndex) { + Map> routings = null; + Set paramRouting = null; + if (routing != null) { + paramRouting = Strings.splitStringByCommaToSet(routing); + } + + ImmutableOpenMap indexToRoutingMap = metaData.getAliases().get(aliasOrIndex); + if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { + // It's an alias + for (ObjectObjectCursor indexRouting : indexToRoutingMap) { + if (!indexRouting.value.searchRoutingValues().isEmpty()) { + // Routing alias + Set r = new HashSet<>(indexRouting.value.searchRoutingValues()); + if (paramRouting != null) { + r.retainAll(paramRouting); + } + if (!r.isEmpty()) { + if (routings == null) { + routings = newHashMap(); + } + routings.put(indexRouting.key, r); + } + } else { + // Non-routing alias + if (paramRouting != null) { + Set r = new HashSet<>(paramRouting); + if (routings == null) { + routings = newHashMap(); + } + routings.put(indexRouting.key, r); + } + } + } + } else { + // It's an index + if (paramRouting != null) { + routings = ImmutableMap.of(aliasOrIndex, paramRouting); + } + } + return routings; + } + + /** + * Sets the same routing for all indices + */ + private Map> resolveSearchRoutingAllIndices(MetaData metaData, String routing) { + if (routing != null) { + Set r = Strings.splitStringByCommaToSet(routing); + Map> routings = newHashMap(); + String[] concreteIndices = metaData.concreteAllIndices(); + for (String index : concreteIndices) { + routings.put(index, r); + } + return routings; + } + return null; + } + + /** + * Identifies whether the array containing index names given as argument refers to all indices + * The empty or null array identifies all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array maps to all indices, false otherwise + */ + public static boolean isAllIndices(List aliasesOrIndices) { + return aliasesOrIndices == null || aliasesOrIndices.isEmpty() || isExplicitAllPattern(aliasesOrIndices); + } + + /** + * Identifies whether the array containing index names given as argument explicitly refers to all indices + * The empty or null array doesn't explicitly map to all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array explicitly maps to all indices, false otherwise + */ + static boolean isExplicitAllPattern(List aliasesOrIndices) { + return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && MetaData.ALL.equals(aliasesOrIndices.get(0)); + } + + /** + * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices + * + * @param indicesOrAliases the array containing index names + * @param concreteIndices array containing the concrete indices that the first argument refers to + * @return true if the first argument is a pattern that maps to all available indices, false otherwise + */ + boolean isPatternMatchingAllIndices(MetaData metaData, String[] indicesOrAliases, String[] concreteIndices) { + // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure + if (concreteIndices.length == metaData.concreteAllIndices().length && indicesOrAliases.length > 0) { + + //we might have something like /-test1,+test1 that would identify all indices + //or something like /-test1 with test1 index missing and IndicesOptions.lenient() + if (indicesOrAliases[0].charAt(0) == '-') { + return true; + } + + //otherwise we check if there's any simple regex + for (String indexOrAlias : indicesOrAliases) { + if (Regex.isSimpleMatchPattern(indexOrAlias)) { + return true; + } + } + } + return false; + } + + final static class Context { + + private final ClusterState state; + private final IndicesOptions options; + + Context(ClusterState state, IndicesOptions options) { + this.state = state; + this.options = options; + } + + public ClusterState getState() { + return state; + } + + public IndicesOptions getOptions() { + return options; + } + } + + private interface ExpressionResolver { + + /** + * Resolves the list of expressions into other expressions if possible (possible concrete indices and aliases, but + * that isn't required). The provided implementations can also be left untouched. + * + * @return a new list with expressions based on the provided expressions + */ + List resolve(Context context, List expressions); + + } + + /** + * Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases + */ + final static class WildcardExpressionResolver implements ExpressionResolver { + + @Override + public List resolve(Context context, List expressions) { + IndicesOptions options = context.getOptions(); + MetaData metaData = context.getState().metaData(); + if (options.expandWildcardsClosed() == false && options.expandWildcardsOpen() == false) { + return expressions; + } + + if (expressions.isEmpty() || (expressions.size() == 1 && MetaData.ALL.equals(expressions.get(0)))) { + if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { + return Arrays.asList(metaData.concreteAllIndices()); + } else if (options.expandWildcardsOpen()) { + return Arrays.asList(metaData.concreteAllOpenIndices()); + } else if (options.expandWildcardsClosed()) { + return Arrays.asList(metaData.concreteAllClosedIndices()); + } else { + return Collections.emptyList(); + } + } + + Set result = null; + for (int i = 0; i < expressions.size(); i++) { + String aliasOrIndex = expressions.get(i); + if (metaData.getAliasAndIndexMap().containsKey(aliasOrIndex)) { + if (result != null) { + result.add(aliasOrIndex); + } + continue; + } + boolean add = true; + if (aliasOrIndex.charAt(0) == '+') { + // if its the first, add empty result set + if (i == 0) { + result = new HashSet<>(); + } + add = true; + aliasOrIndex = aliasOrIndex.substring(1); + } else if (aliasOrIndex.charAt(0) == '-') { + // if its the first, fill it with all the indices... + if (i == 0) { + String[] concreteIndices; + if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { + concreteIndices = metaData.concreteAllIndices(); + } else if (options.expandWildcardsOpen()) { + concreteIndices = metaData.concreteAllOpenIndices(); + } else if (options.expandWildcardsClosed()) { + concreteIndices = metaData.concreteAllClosedIndices(); + } else { + assert false : "Shouldn't end up here"; + concreteIndices = Strings.EMPTY_ARRAY; + } + result = new HashSet<>(Arrays.asList(concreteIndices)); + } + add = false; + aliasOrIndex = aliasOrIndex.substring(1); + } + if (!Regex.isSimpleMatchPattern(aliasOrIndex)) { + if (!options.ignoreUnavailable() && !metaData.getAliasAndIndexMap().containsKey(aliasOrIndex)) { + IndexNotFoundException infe = new IndexNotFoundException(aliasOrIndex); + infe.setResources("index_or_alias", aliasOrIndex); + throw infe; + } + if (result != null) { + if (add) { + result.add(aliasOrIndex); + } else { + result.remove(aliasOrIndex); + } + } + continue; + } + if (result == null) { + // add all the previous ones... + result = new HashSet<>(); + result.addAll(expressions.subList(0, i)); + } + String[] indices; + if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { + indices = metaData.concreteAllIndices(); + } else if (options.expandWildcardsOpen()) { + indices = metaData.concreteAllOpenIndices(); + } else if (options.expandWildcardsClosed()) { + indices = metaData.concreteAllClosedIndices(); + } else { + assert false : "this shouldn't get called if wildcards expand to none"; + indices = Strings.EMPTY_ARRAY; + } + boolean found = false; + // iterating over all concrete indices and see if there is a wildcard match + for (String index : indices) { + if (Regex.simpleMatch(aliasOrIndex, index)) { + found = true; + if (add) { + result.add(index); + } else { + result.remove(index); + } + } + } + // iterating over all aliases and see if there is a wildcard match + for (ObjectCursor cursor : metaData.getAliases().keys()) { + String alias = cursor.value; + if (Regex.simpleMatch(aliasOrIndex, alias)) { + found = true; + if (add) { + result.add(alias); + } else { + result.remove(alias); + } + } + } + if (!found && !options.allowNoIndices()) { + IndexNotFoundException infe = new IndexNotFoundException(aliasOrIndex); + infe.setResources("index_or_alias", aliasOrIndex); + throw infe; + } + } + if (result == null) { + return expressions; + } + if (result.isEmpty() && !options.allowNoIndices()) { + IndexNotFoundException infe = new IndexNotFoundException((String)null); + infe.setResources("index_or_alias", expressions.toArray(new String[0])); + throw infe; + } + return new ArrayList<>(result); + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index 741d173d8f9..4e3c19430e9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -167,10 +167,10 @@ public class MappingMetaData extends AbstractDiffable { Version version) throws TimestampParsingException { try { // no need for unix timestamp parsing in 2.x - FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0) ? dateTimeFormatter : EPOCH_MILLIS_PARSER; + FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0_beta1) ? dateTimeFormatter : EPOCH_MILLIS_PARSER; return Long.toString(formatter.parser().parseMillis(timestampAsString)); } catch (RuntimeException e) { - if (version.before(Version.V_2_0_0)) { + if (version.before(Version.V_2_0_0_beta1)) { try { return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); } catch (RuntimeException e1) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 341e659e142..3715e64f7b1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -25,16 +25,18 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; - import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.DiffableUtils.KeyedReader; +import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -47,8 +49,7 @@ import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.*; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; @@ -58,11 +59,9 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; import java.util.*; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; import static org.elasticsearch.common.settings.Settings.*; -public class MetaData implements Iterable, Diffable { +public class MetaData implements Iterable, Diffable, FromXContentBuilder, ToXContent { public static final MetaData PROTO = builder().build(); @@ -134,7 +133,7 @@ public class MetaData implements Iterable, Diffable { public static final String CONTEXT_MODE_GATEWAY = XContentContext.GATEWAY.toString(); - private final String uuid; + private final String clusterUUID; private final long version; private final Settings transientSettings; @@ -156,8 +155,8 @@ public class MetaData implements Iterable, Diffable { private final ImmutableOpenMap aliasAndIndexToIndexMap; @SuppressWarnings("unchecked") - MetaData(String uuid, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap indices, ImmutableOpenMap templates, ImmutableOpenMap customs) { - this.uuid = uuid; + MetaData(String clusterUUID, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap indices, ImmutableOpenMap templates, ImmutableOpenMap customs) { + this.clusterUUID = clusterUUID; this.version = version; this.transientSettings = transientSettings; this.persistentSettings = persistentSettings; @@ -254,8 +253,8 @@ public class MetaData implements Iterable, Diffable { return this.version; } - public String uuid() { - return this.uuid; + public String clusterUUID() { + return this.clusterUUID; } /** @@ -281,6 +280,10 @@ public class MetaData implements Iterable, Diffable { return aliases(); } + public ImmutableOpenMap getAliasAndIndexMap() { + return aliasAndIndexToIndexMap; + } + /** * Finds the specific index aliases that match with the specified aliases directly or partially via wildcards and * that point to the specified concrete indices or match partially with the indices via wildcards. @@ -471,6 +474,8 @@ public class MetaData implements Iterable, Diffable { /** * Returns indexing routing for the given index. */ + // TODO: This can be moved to IndexNameExpressionResolver too, but this means that we will support wildcards and other expressions + // in the index,bulk,update and delete apis. public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) { // Check if index is specified by an alias ImmutableOpenMap indexAliases = aliases.get(aliasOrIndex); @@ -497,441 +502,6 @@ public class MetaData implements Iterable, Diffable { return routing; } - public Map> resolveSearchRouting(@Nullable String routing, String aliasOrIndex) { - return resolveSearchRouting(routing, convertFromWildcards(new String[]{aliasOrIndex}, IndicesOptions.lenientExpandOpen())); - } - - public Map> resolveSearchRouting(@Nullable String routing, String[] aliasesOrIndices) { - if (isAllIndices(aliasesOrIndices)) { - return resolveSearchRoutingAllIndices(routing); - } - - aliasesOrIndices = convertFromWildcards(aliasesOrIndices, IndicesOptions.lenientExpandOpen()); - - if (aliasesOrIndices.length == 1) { - return resolveSearchRoutingSingleValue(routing, aliasesOrIndices[0]); - } - - Map> routings = null; - Set paramRouting = null; - // List of indices that don't require any routing - Set norouting = new HashSet<>(); - if (routing != null) { - paramRouting = Strings.splitStringByCommaToSet(routing); - } - - for (String aliasOrIndex : aliasesOrIndices) { - ImmutableOpenMap indexToRoutingMap = aliases.get(aliasOrIndex); - if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { - for (ObjectObjectCursor indexRouting : indexToRoutingMap) { - if (!norouting.contains(indexRouting.key)) { - if (!indexRouting.value.searchRoutingValues().isEmpty()) { - // Routing alias - if (routings == null) { - routings = newHashMap(); - } - Set r = routings.get(indexRouting.key); - if (r == null) { - r = new HashSet<>(); - routings.put(indexRouting.key, r); - } - r.addAll(indexRouting.value.searchRoutingValues()); - if (paramRouting != null) { - r.retainAll(paramRouting); - } - if (r.isEmpty()) { - routings.remove(indexRouting.key); - } - } else { - // Non-routing alias - if (!norouting.contains(indexRouting.key)) { - norouting.add(indexRouting.key); - if (paramRouting != null) { - Set r = new HashSet<>(paramRouting); - if (routings == null) { - routings = newHashMap(); - } - routings.put(indexRouting.key, r); - } else { - if (routings != null) { - routings.remove(indexRouting.key); - } - } - } - } - } - } - } else { - // Index - if (!norouting.contains(aliasOrIndex)) { - norouting.add(aliasOrIndex); - if (paramRouting != null) { - Set r = new HashSet<>(paramRouting); - if (routings == null) { - routings = newHashMap(); - } - routings.put(aliasOrIndex, r); - } else { - if (routings != null) { - routings.remove(aliasOrIndex); - } - } - } - } - - } - if (routings == null || routings.isEmpty()) { - return null; - } - return routings; - } - - private Map> resolveSearchRoutingSingleValue(@Nullable String routing, String aliasOrIndex) { - Map> routings = null; - Set paramRouting = null; - if (routing != null) { - paramRouting = Strings.splitStringByCommaToSet(routing); - } - - ImmutableOpenMap indexToRoutingMap = aliases.get(aliasOrIndex); - if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { - // It's an alias - for (ObjectObjectCursor indexRouting : indexToRoutingMap) { - if (!indexRouting.value.searchRoutingValues().isEmpty()) { - // Routing alias - Set r = new HashSet<>(indexRouting.value.searchRoutingValues()); - if (paramRouting != null) { - r.retainAll(paramRouting); - } - if (!r.isEmpty()) { - if (routings == null) { - routings = newHashMap(); - } - routings.put(indexRouting.key, r); - } - } else { - // Non-routing alias - if (paramRouting != null) { - Set r = new HashSet<>(paramRouting); - if (routings == null) { - routings = newHashMap(); - } - routings.put(indexRouting.key, r); - } - } - } - } else { - // It's an index - if (paramRouting != null) { - routings = ImmutableMap.of(aliasOrIndex, paramRouting); - } - } - return routings; - } - - /** - * Sets the same routing for all indices - */ - private Map> resolveSearchRoutingAllIndices(String routing) { - if (routing != null) { - Set r = Strings.splitStringByCommaToSet(routing); - Map> routings = newHashMap(); - String[] concreteIndices = concreteAllIndices(); - for (String index : concreteIndices) { - routings.put(index, r); - } - return routings; - } - return null; - } - - - /** - * Translates the provided indices or aliases, eventually containing wildcard expressions, into actual indices. - * - * @param indicesOptions how the aliases or indices need to be resolved to concrete indices - * @param aliasesOrIndices the aliases or indices to be resolved to concrete indices - * @return the obtained concrete indices - * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options - * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options - * don't allow such a case. - * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options don't allow such a case. - */ - public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException { - if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { - if (isAllIndices(aliasesOrIndices)) { - String[] concreteIndices; - if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { - concreteIndices = concreteAllIndices(); - } else if (indicesOptions.expandWildcardsOpen()) { - concreteIndices = concreteAllOpenIndices(); - } else { - concreteIndices = concreteAllClosedIndices(); - } - - if (!indicesOptions.allowNoIndices() && concreteIndices.length == 0) { - throw new IndexMissingException(new Index("_all")); - } - return concreteIndices; - } - - aliasesOrIndices = convertFromWildcards(aliasesOrIndices, indicesOptions); - } - - if (aliasesOrIndices == null || aliasesOrIndices.length == 0) { - if (!indicesOptions.allowNoIndices()) { - throw new IllegalArgumentException("no indices were specified and wildcard expansion is disabled."); - } else { - return Strings.EMPTY_ARRAY; - } - } - - boolean failClosed = indicesOptions.forbidClosedIndices() && !indicesOptions.ignoreUnavailable(); - - // optimize for single element index (common case) - if (aliasesOrIndices.length == 1) { - return concreteIndices(aliasesOrIndices[0], indicesOptions, !indicesOptions.allowNoIndices()); - } - - // check if its a possible aliased index, if not, just return the passed array - boolean possiblyAliased = false; - boolean closedIndices = false; - for (String index : aliasesOrIndices) { - IndexMetaData indexMetaData = indices.get(index); - if (indexMetaData == null) { - possiblyAliased = true; - break; - } else { - if (indicesOptions.forbidClosedIndices() && indexMetaData.getState() == IndexMetaData.State.CLOSE) { - if (failClosed) { - throw new IndexClosedException(new Index(index)); - } else { - closedIndices = true; - } - } - } - } - if (!possiblyAliased) { - if (closedIndices) { - Set actualIndices = new HashSet<>(Arrays.asList(aliasesOrIndices)); - actualIndices.retainAll(new HashSet(Arrays.asList(allOpenIndices))); - return actualIndices.toArray(new String[actualIndices.size()]); - } else { - return aliasesOrIndices; - } - } - - Set actualIndices = new HashSet<>(); - for (String aliasOrIndex : aliasesOrIndices) { - String[] indices = concreteIndices(aliasOrIndex, indicesOptions, !indicesOptions.ignoreUnavailable()); - Collections.addAll(actualIndices, indices); - } - - if (!indicesOptions.allowNoIndices() && actualIndices.isEmpty()) { - throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices))); - } - return actualIndices.toArray(new String[actualIndices.size()]); - } - - /** - * Utility method that allows to resolve an index or alias to its corresponding single concrete index. - * Callers should make sure they provide proper {@link org.elasticsearch.action.support.IndicesOptions} - * that require a single index as a result. The indices resolution must in fact return a single index when - * using this method, an {@link IllegalArgumentException} gets thrown otherwise. - * - * @param indexOrAlias the index or alias to be resolved to concrete index - * @param indicesOptions the indices options to be used for the index resolution - * @return the concrete index obtained as a result of the index resolution - * @throws IndexMissingException if the index or alias provided doesn't exist - * @throws IllegalArgumentException if the index resolution lead to more than one index - */ - public String concreteSingleIndex(String indexOrAlias, IndicesOptions indicesOptions) throws IndexMissingException, IllegalArgumentException { - String[] indices = concreteIndices(indicesOptions, indexOrAlias); - if (indices.length != 1) { - throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); - } - return indices[0]; - } - - private String[] concreteIndices(String aliasOrIndex, IndicesOptions options, boolean failNoIndices) throws IndexMissingException, IllegalArgumentException { - boolean failClosed = options.forbidClosedIndices() && !options.ignoreUnavailable(); - - // a quick check, if this is an actual index, if so, return it - IndexMetaData indexMetaData = indices.get(aliasOrIndex); - if (indexMetaData != null) { - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - if (failClosed) { - throw new IndexClosedException(new Index(aliasOrIndex)); - } else { - return options.forbidClosedIndices() ? Strings.EMPTY_ARRAY : new String[]{aliasOrIndex}; - } - } else { - return new String[]{aliasOrIndex}; - } - } - // not an actual index, fetch from an alias - String[] indices = aliasAndIndexToIndexMap.getOrDefault(aliasOrIndex, Strings.EMPTY_ARRAY); - if (indices.length == 0 && failNoIndices) { - throw new IndexMissingException(new Index(aliasOrIndex)); - } - if (indices.length > 1 && !options.allowAliasesToMultipleIndices()) { - throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one indices associated with it [" + Arrays.toString(indices) + "], can't execute a single index op"); - } - - // No need to check whether indices referred by aliases are closed, because there are no closed indices. - if (allClosedIndices.length == 0) { - return indices; - } - - switch (indices.length) { - case 0: - return indices; - case 1: - indexMetaData = this.indices.get(indices[0]); - if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) { - if (failClosed) { - throw new IndexClosedException(new Index(indexMetaData.getIndex())); - } else { - if (options.forbidClosedIndices()) { - return Strings.EMPTY_ARRAY; - } - } - } - return indices; - default: - ObjectArrayList concreteIndices = new ObjectArrayList<>(); - for (String index : indices) { - indexMetaData = this.indices.get(index); - if (indexMetaData != null) { - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - if (failClosed) { - throw new IndexClosedException(new Index(indexMetaData.getIndex())); - } else if (!options.forbidClosedIndices()) { - concreteIndices.add(index); - } - } else if (indexMetaData.getState() == IndexMetaData.State.OPEN) { - concreteIndices.add(index); - } else { - throw new IllegalStateException("index state [" + indexMetaData.getState() + "] not supported"); - } - } - } - return concreteIndices.toArray(String.class); - } - } - - /** - * Converts a list of indices or aliases wildcards, and special +/- signs, into their respective full matches. It - * won't convert only to indices, but also to aliases. For example, alias_* will expand to alias_1 and alias_2, not - * to the respective indices those aliases point to. - */ - public String[] convertFromWildcards(String[] aliasesOrIndices, IndicesOptions indicesOptions) { - if (aliasesOrIndices == null) { - return null; - } - Set result = null; - for (int i = 0; i < aliasesOrIndices.length; i++) { - String aliasOrIndex = aliasesOrIndices[i]; - if (aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) { - if (result != null) { - result.add(aliasOrIndex); - } - continue; - } - boolean add = true; - if (aliasOrIndex.charAt(0) == '+') { - // if its the first, add empty result set - if (i == 0) { - result = new HashSet<>(); - } - add = true; - aliasOrIndex = aliasOrIndex.substring(1); - } else if (aliasOrIndex.charAt(0) == '-') { - // if its the first, fill it with all the indices... - if (i == 0) { - String[] concreteIndices; - if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { - concreteIndices = concreteAllIndices(); - } else if (indicesOptions.expandWildcardsOpen()) { - concreteIndices = concreteAllOpenIndices(); - } else if (indicesOptions.expandWildcardsClosed()) { - concreteIndices = concreteAllClosedIndices(); - } else { - assert false : "Shouldn't end up here"; - concreteIndices = Strings.EMPTY_ARRAY; - } - result = new HashSet<>(Arrays.asList(concreteIndices)); - } - add = false; - aliasOrIndex = aliasOrIndex.substring(1); - } - if (!Regex.isSimpleMatchPattern(aliasOrIndex)) { - if (!indicesOptions.ignoreUnavailable() && !aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) { - throw new IndexMissingException(new Index(aliasOrIndex)); - } - if (result != null) { - if (add) { - result.add(aliasOrIndex); - } else { - result.remove(aliasOrIndex); - } - } - continue; - } - if (result == null) { - // add all the previous ones... - result = new HashSet<>(); - result.addAll(Arrays.asList(aliasesOrIndices).subList(0, i)); - } - String[] indices; - if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { - indices = concreteAllIndices(); - } else if (indicesOptions.expandWildcardsOpen()) { - indices = concreteAllOpenIndices(); - } else if (indicesOptions.expandWildcardsClosed()) { - indices = concreteAllClosedIndices(); - } else { - assert false : "convertFromWildcards shouldn't get called if wildcards expansion is disabled"; - indices = Strings.EMPTY_ARRAY; - } - boolean found = false; - // iterating over all concrete indices and see if there is a wildcard match - for (String index : indices) { - if (Regex.simpleMatch(aliasOrIndex, index)) { - found = true; - if (add) { - result.add(index); - } else { - result.remove(index); - } - } - } - // iterating over all aliases and see if there is a wildcard match - for (ObjectCursor cursor : aliases.keys()) { - String alias = cursor.value; - if (Regex.simpleMatch(aliasOrIndex, alias)) { - found = true; - if (add) { - result.add(alias); - } else { - result.remove(alias); - } - } - } - if (!found && !indicesOptions.allowNoIndices()) { - throw new IndexMissingException(new Index(aliasOrIndex)); - } - } - if (result == null) { - return aliasesOrIndices; - } - if (result.isEmpty() && !indicesOptions.allowNoIndices()) { - throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices))); - } - return result.toArray(new String[result.size()]); - } - public boolean hasIndex(String index) { return indices.containsKey(index); } @@ -988,129 +558,26 @@ public class MetaData implements Iterable, Diffable { return numberOfShards(); } - - /** - * Iterates through the list of indices and selects the effective list of filtering aliases for the - * given index. - *

- *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to - * the index itself - null is returned. Returns null if no filtering is required.

- */ - public String[] filteringAliases(String index, String... indicesOrAliases) { - // expand the aliases wildcard - indicesOrAliases = convertFromWildcards(indicesOrAliases, IndicesOptions.lenientExpandOpen()); - - if (isAllIndices(indicesOrAliases)) { - return null; - } - // optimize for the most common single index/alias scenario - if (indicesOrAliases.length == 1) { - String alias = indicesOrAliases[0]; - IndexMetaData indexMetaData = this.indices.get(index); - if (indexMetaData == null) { - // Shouldn't happen - throw new IndexMissingException(new Index(index)); - } - AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); - boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired(); - if (!filteringRequired) { - return null; - } - return new String[]{alias}; - } - List filteringAliases = null; - for (String alias : indicesOrAliases) { - if (alias.equals(index)) { - return null; - } - - IndexMetaData indexMetaData = this.indices.get(index); - if (indexMetaData == null) { - // Shouldn't happen - throw new IndexMissingException(new Index(index)); - } - - AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); - // Check that this is an alias for the current index - // Otherwise - skip it - if (aliasMetaData != null) { - boolean filteringRequired = aliasMetaData.filteringRequired(); - if (filteringRequired) { - // If filtering required - add it to the list of filters - if (filteringAliases == null) { - filteringAliases = newArrayList(); - } - filteringAliases.add(alias); - } else { - // If not, we have a non filtering alias for this index - no filtering needed - return null; - } - } - } - if (filteringAliases == null) { - return null; - } - return filteringAliases.toArray(new String[filteringAliases.size()]); - } - - /** - * Identifies whether the array containing index names given as argument refers to all indices - * The empty or null array identifies all indices - * - * @param aliasesOrIndices the array containing index names - * @return true if the provided array maps to all indices, false otherwise - */ - public static boolean isAllIndices(String[] aliasesOrIndices) { - return aliasesOrIndices == null || aliasesOrIndices.length == 0 || isExplicitAllPattern(aliasesOrIndices); - } - /** * Identifies whether the array containing type names given as argument refers to all types * The empty or null array identifies all types * - * @param types the array containing index names - * @return true if the provided array maps to all indices, false otherwise + * @param types the array containing types + * @return true if the provided array maps to all types, false otherwise */ public static boolean isAllTypes(String[] types) { - return types == null || types.length == 0 || isExplicitAllPattern(types); + return types == null || types.length == 0 || isExplicitAllType(types); } /** - * Identifies whether the array containing index names given as argument explicitly refers to all indices - * The empty or null array doesn't explicitly map to all indices + * Identifies whether the array containing type names given as argument explicitly refers to all types + * The empty or null array doesn't explicitly map to all types * - * @param aliasesOrIndices the array containing index names - * @return true if the provided array explicitly maps to all indices, false otherwise + * @param types the array containing index names + * @return true if the provided array explicitly maps to all types, false otherwise */ - public static boolean isExplicitAllPattern(String[] aliasesOrIndices) { - return aliasesOrIndices != null && aliasesOrIndices.length == 1 && ALL.equals(aliasesOrIndices[0]); - } - - /** - * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices - * - * @param indicesOrAliases the array containing index names - * @param concreteIndices array containing the concrete indices that the first argument refers to - * @return true if the first argument is a pattern that maps to all available indices, false otherwise - */ - public boolean isPatternMatchingAllIndices(String[] indicesOrAliases, String[] concreteIndices) { - // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure - if (concreteIndices.length == concreteAllIndices().length && indicesOrAliases.length > 0) { - - //we might have something like /-test1,+test1 that would identify all indices - //or something like /-test1 with test1 index missing and IndicesOptions.lenient() - if (indicesOrAliases[0].charAt(0) == '-') { - return true; - } - - //otherwise we check if there's any simple regex - for (String indexOrAlias : indicesOrAliases) { - if (Regex.isSimpleMatchPattern(indexOrAlias)) { - return true; - } - } - } - return false; + public static boolean isExplicitAllType(String[] types) { + return types != null && types.length == 1 && ALL.equals(types[0]); } /** @@ -1169,11 +636,22 @@ public class MetaData implements Iterable, Diffable { return new MetaDataDiff(in); } + @Override + public MetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + return Builder.fromXContent(parser); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Builder.toXContent(this, builder, params); + return builder; + } + private static class MetaDataDiff implements Diff { private long version; - private String uuid; + private String clusterUUID; private Settings transientSettings; private Settings persistentSettings; @@ -1183,7 +661,7 @@ public class MetaData implements Iterable, Diffable { public MetaDataDiff(MetaData before, MetaData after) { - uuid = after.uuid; + clusterUUID = after.clusterUUID; version = after.version; transientSettings = after.transientSettings; persistentSettings = after.persistentSettings; @@ -1193,7 +671,7 @@ public class MetaData implements Iterable, Diffable { } public MetaDataDiff(StreamInput in) throws IOException { - uuid = in.readString(); + clusterUUID = in.readString(); version = in.readLong(); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); @@ -1214,7 +692,7 @@ public class MetaData implements Iterable, Diffable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(uuid); + out.writeString(clusterUUID); out.writeLong(version); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); @@ -1226,7 +704,7 @@ public class MetaData implements Iterable, Diffable { @Override public MetaData apply(MetaData part) { Builder builder = builder(); - builder.uuid(uuid); + builder.clusterUUID(clusterUUID); builder.version(version); builder.transientSettings(transientSettings); builder.persistentSettings(persistentSettings); @@ -1241,7 +719,7 @@ public class MetaData implements Iterable, Diffable { public MetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); - builder.uuid = in.readString(); + builder.clusterUUID = in.readString(); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); int size = in.readVInt(); @@ -1264,7 +742,7 @@ public class MetaData implements Iterable, Diffable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); - out.writeString(uuid); + out.writeString(clusterUUID); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); out.writeVInt(indices.size()); @@ -1351,7 +829,7 @@ public class MetaData implements Iterable, Diffable { } if (newPersistentSettings != null) { - return new MetaData(metaData.uuid(), + return new MetaData(metaData.clusterUUID(), metaData.version(), metaData.transientSettings(), newPersistentSettings.build(), @@ -1366,7 +844,7 @@ public class MetaData implements Iterable, Diffable { public static class Builder { - private String uuid; + private String clusterUUID; private long version; private Settings transientSettings = Settings.Builder.EMPTY_SETTINGS; @@ -1377,14 +855,14 @@ public class MetaData implements Iterable, Diffable { private final ImmutableOpenMap.Builder customs; public Builder() { - uuid = "_na_"; + clusterUUID = "_na_"; indices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); customs = ImmutableOpenMap.builder(); } public Builder(MetaData metaData) { - this.uuid = metaData.uuid; + this.clusterUUID = metaData.clusterUUID; this.transientSettings = metaData.transientSettings; this.persistentSettings = metaData.persistentSettings; this.version = metaData.version; @@ -1477,7 +955,7 @@ public class MetaData implements Iterable, Diffable { for (String index : indices) { IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } put(IndexMetaData.builder(indexMetaData) .settings(settingsBuilder().put(indexMetaData.settings()).put(settings))); @@ -1492,7 +970,7 @@ public class MetaData implements Iterable, Diffable { for (String index : indices) { IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } put(IndexMetaData.builder(indexMetaData).numberOfReplicas(numberOfReplicas)); } @@ -1522,20 +1000,20 @@ public class MetaData implements Iterable, Diffable { return this; } - public Builder uuid(String uuid) { - this.uuid = uuid; + public Builder clusterUUID(String clusterUUID) { + this.clusterUUID = clusterUUID; return this; } - public Builder generateUuidIfNeeded() { - if (uuid.equals("_na_")) { - uuid = Strings.randomBase64UUID(); + public Builder generateClusterUuidIfNeeded() { + if (clusterUUID.equals("_na_")) { + clusterUUID = Strings.randomBase64UUID(); } return this; } public MetaData build() { - return new MetaData(uuid, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build()); + return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build()); } public static String toXContent(MetaData metaData) throws IOException { @@ -1552,7 +1030,7 @@ public class MetaData implements Iterable, Diffable { builder.startObject("meta-data"); builder.field("version", metaData.version()); - builder.field("uuid", metaData.uuid); + builder.field("cluster_uuid", metaData.clusterUUID); if (!metaData.persistentSettings().getAsMap().isEmpty()) { builder.startObject("settings"); @@ -1644,8 +1122,8 @@ public class MetaData implements Iterable, Diffable { } else if (token.isValue()) { if ("version".equals(currentFieldName)) { builder.version = parser.longValue(); - } else if ("uuid".equals(currentFieldName)) { - builder.uuid = parser.text(); + } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) { + builder.clusterUUID = parser.text(); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 87dd5e76010..1b97802a449 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -332,7 +332,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); } - indexSettingsBuilder.put(SETTING_UUID, Strings.randomBase64UUID()); + indexSettingsBuilder.put(SETTING_INDEX_UUID, Strings.randomBase64UUID()); Settings actualIndexSettings = indexSettingsBuilder.build(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 3d5d938bde4..10bc32190a2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -34,8 +34,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.ScheduledFuture; @@ -116,7 +115,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { if (!currentState.metaData().hasConcreteIndex(request.index)) { - throw new IndexMissingException(new Index(request.index)); + throw new IndexNotFoundException(request.index); } logger.info("[{}] deleting index", request.index); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 7ab2c08f56d..fb2e933dcfb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -33,10 +33,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.IndexService; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesService; import java.util.List; @@ -76,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { for (AliasAction aliasAction : request.actions()) { aliasValidator.validateAliasAction(aliasAction, currentState.metaData()); if (!currentState.metaData().hasIndex(aliasAction.index())) { - throw new IndexMissingException(new Index(aliasAction.index())); + throw new IndexNotFoundException(aliasAction.index()); } } @@ -85,7 +84,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { for (AliasAction aliasAction : request.actions()) { IndexMetaData indexMetaData = builder.get(aliasAction.index()); if (indexMetaData == null) { - throw new IndexMissingException(new Index(aliasAction.index())); + throw new IndexNotFoundException(aliasAction.index()); } // TODO: not copy (putAll) IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index c1113f8e688..f680ac1d5e5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -39,8 +39,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException; import org.elasticsearch.rest.RestStatus; @@ -87,7 +86,7 @@ public class MetaDataIndexStateService extends AbstractComponent { for (String index : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(index); if (indexMetaData == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } if (indexMetaData.state() != IndexMetaData.State.CLOSE) { @@ -147,7 +146,7 @@ public class MetaDataIndexStateService extends AbstractComponent { for (String index : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(index); if (indexMetaData == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } if (indexMetaData.state() != IndexMetaData.State.OPEN) { indicesToOpen.add(index); @@ -167,11 +166,7 @@ public class MetaDataIndexStateService extends AbstractComponent { IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build(); // The index might be closed because we couldn't import it due to old incompatible version // We need to check that this index can be upgraded to the current version - try { - indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); - } catch (Exception ex) { - throw new IndexException(new Index(index), "cannot open the index due to upgrade failure", ex); - } + indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); mdBuilder.put(indexMetaData, true); blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 21d92df4bd7..6d02325bfca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -81,7 +81,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index * cannot be updated the method throws an exception. */ - public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) throws Exception { + public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) { // Throws an exception if there are too-old segments: checkSupportedVersion(indexMetaData); IndexMetaData newMetaData = upgradeLegacyRoutingSettings(indexMetaData); @@ -94,7 +94,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * that were created before Elasticsearch v0.90.0 should be upgraded using upgrade plugin before they can * be open by this version of elasticsearch. */ - private void checkSupportedVersion(IndexMetaData indexMetaData) throws Exception { + private void checkSupportedVersion(IndexMetaData indexMetaData) { if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) { throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v0.90.0 and wasn't upgraded." + " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion() @@ -122,9 +122,9 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * Elasticsearch 2.0 deprecated custom routing hash functions. So what we do here is that for old indices, we * move this old and deprecated node setting to an index setting so that we can keep things backward compatible. */ - private IndexMetaData upgradeLegacyRoutingSettings(IndexMetaData indexMetaData) throws Exception { + private IndexMetaData upgradeLegacyRoutingSettings(IndexMetaData indexMetaData) { if (indexMetaData.settings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) == null - && indexMetaData.getCreationVersion().before(Version.V_2_0_0)) { + && indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) { // these settings need an upgrade Settings indexSettings = Settings.builder().put(indexMetaData.settings()) .put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, pre20HashFunction) @@ -134,11 +134,11 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { .version(indexMetaData.version()) .settings(indexSettings) .build(); - } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) { + } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) { if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null || indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) { - throw new IllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION - + "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in their index settings"); + throw new IllegalStateException("Index [" + indexMetaData.getIndex() + "] created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION + + "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in its index settings"); } } return indexMetaData; @@ -188,7 +188,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * missing units. */ private IndexMetaData addDefaultUnitsIfNeeded(IndexMetaData indexMetaData) { - if (indexMetaData.getCreationVersion().before(Version.V_2_0_0)) { + if (indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) { // TODO: can we somehow only do this *once* for a pre-2.0 index? Maybe we could stuff a "fake marker setting" here? Seems hackish... // Created lazily if we find any settings that are missing units: Settings settings = indexMetaData.settings(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 46ee0cdf736..04d52b0ea55 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -37,13 +37,12 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.MergeResult; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; @@ -347,7 +346,7 @@ public class MetaDataMappingService extends AbstractComponent { try { for (String index : request.indices()) { if (!currentState.metaData().hasIndex(index)) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } } @@ -396,7 +395,7 @@ public class MetaDataMappingService extends AbstractComponent { // For example in MapperService we can't distinguish between a create index api call // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. - if (Version.indexCreated(indexService.getIndexSettings()).onOrAfter(Version.V_2_0_0) && newMapper.parentFieldMapper().active()) { + if (Version.indexCreated(indexService.getIndexSettings()).onOrAfter(Version.V_2_0_0_beta1) && newMapper.parentFieldMapper().active()) { IndexMetaData indexMetaData = currentState.metaData().index(index); for (ObjectCursor mapping : indexMetaData.mappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { @@ -472,7 +471,7 @@ public class MetaDataMappingService extends AbstractComponent { for (String indexName : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(indexName); if (indexMetaData == null) { - throw new IndexMissingException(new Index(indexName)); + throw new IndexNotFoundException(indexName); } MappingMetaData mappingMd = mappings.get(indexName); if (mappingMd != null) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 07e67f11e54..d23c571faa7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -58,10 +58,13 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements private final DynamicSettings dynamicSettings; + private final IndexNameExpressionResolver indexNameExpressionResolver; + @Inject - public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings) { + public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings); this.clusterService = clusterService; + this.indexNameExpressionResolver = indexNameExpressionResolver; this.clusterService.add(this); this.allocationService = allocationService; this.dynamicSettings = dynamicSettings; @@ -215,7 +218,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements @Override public ClusterState execute(ClusterState currentState) { - String[] actualIndices = currentState.metaData().concreteIndices(IndicesOptions.strictExpand(), request.indices()); + String[] actualIndices = indexNameExpressionResolver.concreteIndices(currentState, IndicesOptions.strictExpand(), request.indices()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 4b4c765b14a..8d63654e07e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddressSerializers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Map; @@ -38,7 +40,7 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add /** * A discovery node represents a node that is part of the cluster. */ -public class DiscoveryNode implements Streamable { +public class DiscoveryNode implements Streamable, ToXContent { /** * Minimum version of a node to communicate with. This version corresponds to the minimum compatibility version @@ -372,4 +374,20 @@ public class DiscoveryNode implements Streamable { } return sb.toString(); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(id(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("name", name()); + builder.field("transport_address", address().toString()); + + builder.startObject("attributes"); + for (Map.Entry attr : attributes().entrySet()) { + builder.field(attr.getKey(), attr.getValue()); + } + builder.endObject(); + + builder.endObject(); + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java new file mode 100644 index 00000000000..ffcf9f1e80c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Uniquely identifies an allocation. An allocation is a shard moving from unassigned to initializing, + * or relocation. + *

+ * Relocation is a special case, where the origin shard is relocating with a relocationId and same id, and + * the target shard (only materialized in RoutingNodes) is initializing with the id set to the origin shard + * relocationId. Once relocation is done, the new allocation id is set to the relocationId. This is similar + * behavior to how ShardRouting#currentNodeId is used. + */ +public class AllocationId implements ToXContent { + + private final String id; + private final String relocationId; + + AllocationId(StreamInput in) throws IOException { + this.id = in.readString(); + this.relocationId = in.readOptionalString(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + out.writeOptionalString(this.relocationId); + } + + private AllocationId(String id, String relocationId) { + this.id = id; + this.relocationId = relocationId; + } + + /** + * Creates a new allocation id for initializing allocation. + */ + public static AllocationId newInitializing() { + return new AllocationId(Strings.randomBase64UUID(), null); + } + + /** + * Creates a new allocation id for the target initializing shard that is the result + * of a relocation. + */ + public static AllocationId newTargetRelocation(AllocationId allocationId) { + assert allocationId.getRelocationId() != null; + return new AllocationId(allocationId.getRelocationId(), allocationId.getId()); + } + + /** + * Creates a new allocation id for a shard that moves to be relocated, populating + * the transient holder for relocationId. + */ + public static AllocationId newRelocation(AllocationId allocationId) { + assert allocationId.getRelocationId() == null; + return new AllocationId(allocationId.getId(), Strings.randomBase64UUID()); + } + + /** + * Creates a new allocation id representing a cancelled relocation. + * + * Note that this is expected to be called on the allocation id + * of the *source* shard + * */ + public static AllocationId cancelRelocation(AllocationId allocationId) { + assert allocationId.getRelocationId() != null; + return new AllocationId(allocationId.getId(), null); + } + + /** + * Creates a new allocation id finalizing a relocation. + * + * Note that this is expected to be called on the allocation id + * of the *target* shard and thus it only needs to clear the relocating id. + */ + public static AllocationId finishRelocation(AllocationId allocationId) { + assert allocationId.getRelocationId() != null; + return new AllocationId(allocationId.getId(), null); + } + + /** + * The allocation id uniquely identifying an allocation, note, if it is relocation + * the {@link #getRelocationId()} need to be taken into account as well. + */ + public String getId() { + return id; + } + + /** + * The transient relocation id holding the unique id that is used for relocation. + */ + public String getRelocationId() { + return relocationId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + AllocationId that = (AllocationId) o; + if (!id.equals(that.id)) return false; + return !(relocationId != null ? !relocationId.equals(that.relocationId) : that.relocationId != null); + + } + + @Override + public int hashCode() { + int result = id.hashCode(); + result = 31 * result + (relocationId != null ? relocationId.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "[id=" + id + (relocationId == null ? "" : ", rId=" + relocationId) + "]"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("allocation_id"); + builder.field("id", id); + if (relocationId != null) { + builder.field("relocation_id", relocationId); + } + builder.endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 55fa01747b3..ef5011f4dbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -53,6 +53,7 @@ public class IndexShardRoutingTable implements Iterable { final ImmutableList shards; final ImmutableList activeShards; final ImmutableList assignedShards; + final static ImmutableList NO_SHARDS = ImmutableList.of(); final boolean allShardsStarted; /** @@ -279,6 +280,16 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + /** + * Returns true if no primaries are active or initializing for this shard + */ + private boolean noPrimariesActive() { + if (!primaryAsList.isEmpty() && !primaryAsList.get(0).active() && !primaryAsList.get(0).initializing()) { + return true; + } + return false; + } + /** * Returns an iterator only on the primary shard. */ @@ -287,9 +298,8 @@ public class IndexShardRoutingTable implements Iterable { } public ShardIterator primaryActiveInitializingShardIt() { - if (!primaryAsList.isEmpty() && !primaryAsList.get(0).active() && !primaryAsList.get(0).initializing()) { - List primaryList = ImmutableList.of(); - return new PlainShardIterator(shardId, primaryList); + if (noPrimariesActive()) { + return new PlainShardIterator(shardId, NO_SHARDS); } return primaryShardIt(); } @@ -312,6 +322,49 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + public ShardIterator replicaActiveInitializingShardIt() { + // If the primaries are unassigned, return an empty list (there aren't + // any replicas to query anyway) + if (noPrimariesActive()) { + return new PlainShardIterator(shardId, NO_SHARDS); + } + + LinkedList ordered = new LinkedList<>(); + for (ShardRouting replica : shuffler.shuffle(replicas)) { + if (replica.active()) { + ordered.addFirst(replica); + } else if (replica.initializing()) { + ordered.addLast(replica); + } + } + return new PlainShardIterator(shardId, ordered); + } + + public ShardIterator replicaFirstActiveInitializingShardsIt() { + // If the primaries are unassigned, return an empty list (there aren't + // any replicas to query anyway) + if (noPrimariesActive()) { + return new PlainShardIterator(shardId, NO_SHARDS); + } + + ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size()); + // fill it in a randomized fashion with the active replicas + for (ShardRouting replica : shuffler.shuffle(replicas)) { + if (replica.active()) { + ordered.add(replica); + } + } + + // Add the primary shard + ordered.add(primary); + + // Add initializing shards last + if (!allInitializingShards.isEmpty()) { + ordered.addAll(allInitializingShards); + } + return new PlainShardIterator(shardId, ordered); + } + public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) { ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size()); // fill it in a randomized fashion diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 0ea80d18a0d..6db68524992 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -32,10 +32,9 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.shard.ShardNotFoundException; import java.util.Collections; import java.util.HashSet; @@ -57,32 +56,32 @@ public class OperationRouting extends AbstractComponent { this.awarenessAllocationDecider = awarenessAllocationDecider; } - public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException { + public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) { return shards(clusterState, index, type, id, routing).shardsIt(); } - public ShardIterator deleteShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) throws IndexMissingException, IndexShardMissingException { + public ShardIterator deleteShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) { return shards(clusterState, index, type, id, routing).shardsIt(); } - public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) throws IndexMissingException, IndexShardMissingException { + public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) { return preferenceActiveShardIterator(shards(clusterState, index, type, id, routing), clusterState.nodes().localNodeId(), clusterState.nodes(), preference); } - public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) throws IndexMissingException, IndexShardMissingException { + public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) { return preferenceActiveShardIterator(shards(clusterState, index, shardId), clusterState.nodes().localNodeId(), clusterState.nodes(), preference); } - public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) throws IndexMissingException { + public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) { return indexRoutingTable(clusterState, index).groupByShardsIt(); } - public int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) throws IndexMissingException { + public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); return shards.size(); } - public GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) throws IndexMissingException { + public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); final Set set = new HashSet<>(shards.size()); for (IndexShardRoutingTable shard : shards) { @@ -96,7 +95,7 @@ public class OperationRouting extends AbstractComponent { private static final Map> EMPTY_ROUTING = Collections.emptyMap(); - private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) throws IndexMissingException { + private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map final Set set = new HashSet<>(); // we use set here and not list since we might get duplicates @@ -108,7 +107,7 @@ public class OperationRouting extends AbstractComponent { int shardId = shardId(clusterState, index, null, null, r); IndexShardRoutingTable indexShard = indexRouting.shard(shardId); if (indexShard == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); + throw new ShardNotFoundException(new ShardId(index, shardId)); } // we might get duplicates, but that's ok, they will override one another set.add(indexShard); @@ -175,8 +174,12 @@ public class OperationRouting extends AbstractComponent { return indexShard.preferNodeActiveInitializingShardsIt(localNodeId); case PRIMARY: return indexShard.primaryActiveInitializingShardIt(); + case REPLICA: + return indexShard.replicaActiveInitializingShardIt(); case PRIMARY_FIRST: return indexShard.primaryFirstActiveInitializingShardsIt(); + case REPLICA_FIRST: + return indexShard.replicaFirstActiveInitializingShardsIt(); case ONLY_LOCAL: return indexShard.onlyNodeActiveInitializingShardsIt(localNodeId); case ONLY_NODE: @@ -202,7 +205,7 @@ public class OperationRouting extends AbstractComponent { public IndexMetaData indexMetaData(ClusterState clusterState, String index) { IndexMetaData indexMetaData = clusterState.metaData().index(index); if (indexMetaData == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } return indexMetaData; } @@ -210,7 +213,7 @@ public class OperationRouting extends AbstractComponent { protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) { IndexRoutingTable indexRouting = clusterState.routingTable().index(index); if (indexRouting == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } return indexRouting; } @@ -226,7 +229,7 @@ public class OperationRouting extends AbstractComponent { protected IndexShardRoutingTable shards(ClusterState clusterState, String index, int shardId) { IndexShardRoutingTable indexShard = indexRoutingTable(clusterState, index).shard(shardId); if (indexShard == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); + throw new ShardNotFoundException(new ShardId(index, shardId)); } return indexShard; } @@ -248,7 +251,7 @@ public class OperationRouting extends AbstractComponent { } else { hash = hash(hashFunction, routing); } - if (createdVersion.onOrAfter(Version.V_2_0_0)) { + if (createdVersion.onOrAfter(Version.V_2_0_0_beta1)) { return MathUtils.mod(hash, indexMetaData.numberOfShards()); } else { return Math.abs(hash % indexMetaData.numberOfShards()); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java b/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java index e9057bfe681..6de251b9d52 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java @@ -44,11 +44,21 @@ public enum Preference { */ PRIMARY("_primary"), + /** + * Route to replica shards + */ + REPLICA("_replica"), + /** * Route to primary shards first */ PRIMARY_FIRST("_primary_first"), + /** + * Route to replica shards first + */ + REPLICA_FIRST("_replica_first"), + /** * Route to the local shard only */ @@ -96,9 +106,14 @@ public enum Preference { return LOCAL; case "_primary": return PRIMARY; + case "_replica": + return REPLICA; case "_primary_first": case "_primaryFirst": return PRIMARY_FIRST; + case "_replica_first": + case "_replicaFirst": + return REPLICA_FIRST; case "_only_local": case "_onlyLocal": return ONLY_LOCAL; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java index d36be1be519..01bbfc33558 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -37,11 +38,14 @@ public class RestoreSource implements Streamable, ToXContent { private String index; + private Version version; + RestoreSource() { } - public RestoreSource(SnapshotId snapshotId, String index) { + public RestoreSource(SnapshotId snapshotId, Version version, String index) { this.snapshotId = snapshotId; + this.version = version; this.index = index; } @@ -53,6 +57,10 @@ public class RestoreSource implements Streamable, ToXContent { return index; } + public Version version() { + return version; + } + public static RestoreSource readRestoreSource(StreamInput in) throws IOException { RestoreSource restoreSource = new RestoreSource(); restoreSource.readFrom(in); @@ -66,12 +74,14 @@ public class RestoreSource implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { snapshotId = SnapshotId.readSnapshotId(in); + version = Version.readVersion(in); index = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { snapshotId.writeTo(out); + Version.writeVersion(version, out); out.writeString(index); } @@ -80,6 +90,7 @@ public class RestoreSource implements Streamable, ToXContent { return builder.startObject() .field("repository", snapshotId.getRepository()) .field("snapshot", snapshotId.getSnapshot()) + .field("version", version.toString()) .field("index", index) .endObject(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index bcc355042cf..b348afbc192 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -363,36 +363,15 @@ public class RoutingNodes implements Iterable { } /** - * Assign a shard to a node. This will increment the inactiveShardCount counter - * and the inactivePrimaryCount counter if the shard is the primary. - * In case the shard is already assigned and started, it will be marked as - * relocating, which is accounted for, too, so the number of concurrent relocations - * can be retrieved easily. - * This method can be called several times for the same shard, only the first time - * will change the state. - * - * INITIALIZING => INITIALIZING - * UNASSIGNED => INITIALIZING - * STARTED => RELOCATING - * RELOCATING => RELOCATING - * - * @param shard the shard to be assigned - * @param nodeId the nodeId this shard should initialize on or relocate from + * Moves a shard from unassigned to initialize state */ - public void assign(ShardRouting shard, String nodeId) { - // state will not change if the shard is already initializing. - ShardRoutingState oldState = shard.state(); - shard.assignToNode(nodeId); + public void initialize(ShardRouting shard, String nodeId) { + assert shard.unassigned() : shard; + shard.initialize(nodeId); node(nodeId).add(shard); - if (oldState == ShardRoutingState.UNASSIGNED) { - inactiveShardCount++; - if (shard.primary()) { - inactivePrimaryCount++; - } - } - - if (shard.state() == ShardRoutingState.RELOCATING) { - relocatingShards++; + inactiveShardCount++; + if (shard.primary()) { + inactivePrimaryCount++; } assignedShardsAdd(shard); } @@ -406,7 +385,8 @@ public class RoutingNodes implements Iterable { relocatingShards++; shard.relocate(nodeId); ShardRouting target = shard.buildTargetRelocatingShard(); - assign(target, target.currentNodeId()); + node(target.currentNodeId()).add(target); + assignedShardsAdd(target); return target; } @@ -414,15 +394,14 @@ public class RoutingNodes implements Iterable { * Mark a shard as started and adjusts internal statistics. */ public void started(ShardRouting shard) { - if (!shard.active() && shard.relocatingNodeId() == null) { + assert !shard.active() : "expected an intializing shard " + shard; + if (shard.relocatingNodeId() == null) { + // if this is not a target shard for relocation, we need to update statistics inactiveShardCount--; if (shard.primary()) { inactivePrimaryCount--; } - } else if (shard.relocating()) { - relocatingShards--; } - assert !shard.started(); shard.moveToStarted(); } @@ -777,6 +756,7 @@ public class RoutingNodes implements Iterable { private final RoutingNode iterable; private ShardRouting shard; private final Iterator delegate; + private boolean removed = false; public RoutingNodeIterator(RoutingNode iterable) { this.delegate = iterable.mutableIterator(); @@ -790,6 +770,7 @@ public class RoutingNodes implements Iterable { @Override public ShardRouting next() { + removed = false; return shard = delegate.next(); } @@ -797,6 +778,13 @@ public class RoutingNodes implements Iterable { public void remove() { delegate.remove(); RoutingNodes.this.remove(shard); + removed = true; + } + + + /** returns true if {@link #remove()} or {@link #moveToUnassigned(UnassignedInfo)} were called on the current shard */ + public boolean isRemoved() { + return removed; } @Override @@ -805,10 +793,16 @@ public class RoutingNodes implements Iterable { } public void moveToUnassigned(UnassignedInfo unassignedInfo) { - remove(); + if (isRemoved() == false) { + remove(); + } ShardRouting unassigned = new ShardRouting(shard); // protective copy of the mutable shard unassigned.moveToUnassigned(unassignedInfo); unassigned().add(unassigned); } + + public ShardRouting current() { + return shard; + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 3172ea0bee5..bfc1d93d940 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -134,7 +134,7 @@ public class RoutingService extends AbstractLifecycleComponent i } // visible for testing - void performReroute(String reason) { + protected void performReroute(String reason) { try { if (lifecycle.stopped()) { return; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 1327b556208..0359849a6fb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -26,9 +26,8 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndexMissingException; import java.io.IOException; import java.util.ArrayList; @@ -124,7 +123,7 @@ public class RoutingTable implements Iterable, Diffable allShards() throws IndexMissingException { + public List allShards() { List shards = Lists.newArrayList(); String[] indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]); for (String index : indices) { @@ -139,13 +138,13 @@ public class RoutingTable implements Iterable, Diffable allShards(String index) throws IndexMissingException { + public List allShards(String index) { List shards = Lists.newArrayList(); IndexRoutingTable indexRoutingTable = index(index); if (indexRoutingTable == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { @@ -155,7 +154,7 @@ public class RoutingTable implements Iterable, Diffable, Diffableextra shard iterator will be added for relocating shards. The extra * iterator contains a single ShardRouting pointing at the relocating target */ - public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) throws IndexMissingException { + public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) { // use list here since we need to maintain identity across shards ArrayList set = new ArrayList<>(); for (String index : indices) { @@ -191,7 +190,7 @@ public class RoutingTable implements Iterable, Diffable, Diffableextra shard iterator will be added for relocating shards. The extra * iterator contains a single ShardRouting pointing at the relocating target */ - public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) throws IndexMissingException { + public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) { // use list here since we need to maintain identity across shards ArrayList set = new ArrayList<>(); for (String index : indices) { @@ -234,16 +233,16 @@ public class RoutingTable implements Iterable, Diffable set = new ArrayList<>(); for (String index : indices) { IndexRoutingTable indexRoutingTable = index(index); if (indexRoutingTable == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { ShardRouting primary = indexShardRoutingTable.primaryShard(); @@ -387,7 +386,7 @@ public class RoutingTable implements Iterable, Diffable asList; + private transient ShardId shardIdentifier; private boolean frozen = false; private ShardRouting() { @@ -59,7 +60,7 @@ public final class ShardRouting implements Streamable, ToXContent { } public ShardRouting(ShardRouting copy, long version) { - this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), true); + this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true); } /** @@ -68,7 +69,7 @@ public final class ShardRouting implements Streamable, ToXContent { */ ShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version, - UnassignedInfo unassignedInfo, boolean internal) { + UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal) { this.index = index; this.shardId = shardId; this.currentNodeId = currentNodeId; @@ -79,11 +80,13 @@ public final class ShardRouting implements Streamable, ToXContent { this.version = version; this.restoreSource = restoreSource; this.unassignedInfo = unassignedInfo; + this.allocationId = allocationId; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; if (!internal) { assert state == ShardRoutingState.UNASSIGNED; assert currentNodeId == null; assert relocatingNodeId == null; + assert allocationId == null; } } @@ -91,7 +94,7 @@ public final class ShardRouting implements Streamable, ToXContent { * Creates a new unassigned shard. */ public static ShardRouting newUnassigned(String index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) { - return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, true); + return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true); } /** @@ -201,7 +204,8 @@ public final class ShardRouting implements Streamable, ToXContent { */ public ShardRouting buildTargetRelocatingShard() { assert relocating(); - return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo, true); + return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo, + AllocationId.newTargetRelocation(allocationId), true); } /** @@ -220,6 +224,14 @@ public final class ShardRouting implements Streamable, ToXContent { return unassignedInfo; } + /** + * An id that uniquely identifies an allocation. + */ + @Nullable + public AllocationId allocationId() { + return this.allocationId; + } + /** * Returns true iff this shard is a primary. */ @@ -287,6 +299,9 @@ public final class ShardRouting implements Streamable, ToXContent { if (in.readBoolean()) { unassignedInfo = new UnassignedInfo(in); } + if (in.readBoolean()) { + allocationId = new AllocationId(in); + } freeze(); } @@ -332,6 +347,12 @@ public final class ShardRouting implements Streamable, ToXContent { } else { out.writeBoolean(false); } + if (allocationId != null) { + out.writeBoolean(true); + allocationId.writeTo(out); + } else { + out.writeBoolean(false); + } } @Override @@ -350,32 +371,25 @@ public final class ShardRouting implements Streamable, ToXContent { void moveToUnassigned(UnassignedInfo unassignedInfo) { ensureNotFrozen(); version++; - assert state != ShardRoutingState.UNASSIGNED; + assert state != ShardRoutingState.UNASSIGNED : this; state = ShardRoutingState.UNASSIGNED; currentNodeId = null; relocatingNodeId = null; this.unassignedInfo = unassignedInfo; + allocationId = null; } /** - * Assign this shard to a node. - * - * @param nodeId id of the node to assign this shard to + * Initializes an unassigned shard on a node. */ - void assignToNode(String nodeId) { + void initialize(String nodeId) { ensureNotFrozen(); version++; - if (currentNodeId == null) { - assert state == ShardRoutingState.UNASSIGNED; - state = ShardRoutingState.INITIALIZING; - currentNodeId = nodeId; - relocatingNodeId = null; - } else if (state == ShardRoutingState.STARTED) { - state = ShardRoutingState.RELOCATING; - relocatingNodeId = nodeId; - } else if (state == ShardRoutingState.RELOCATING) { - assert nodeId.equals(relocatingNodeId); - } + assert state == ShardRoutingState.UNASSIGNED : this; + assert relocatingNodeId == null : this; + state = ShardRoutingState.INITIALIZING; + currentNodeId = nodeId; + allocationId = AllocationId.newInitializing(); } /** @@ -386,9 +400,10 @@ public final class ShardRouting implements Streamable, ToXContent { void relocate(String relocatingNodeId) { ensureNotFrozen(); version++; - assert state == ShardRoutingState.STARTED; + assert state == ShardRoutingState.STARTED : this; state = ShardRoutingState.RELOCATING; this.relocatingNodeId = relocatingNodeId; + this.allocationId = AllocationId.newRelocation(allocationId); } /** @@ -398,12 +413,13 @@ public final class ShardRouting implements Streamable, ToXContent { void cancelRelocation() { ensureNotFrozen(); version++; - assert state == ShardRoutingState.RELOCATING; - assert assignedToNode(); - assert relocatingNodeId != null; + assert state == ShardRoutingState.RELOCATING : this; + assert assignedToNode() : this; + assert relocatingNodeId != null : this; state = ShardRoutingState.STARTED; relocatingNodeId = null; + allocationId = AllocationId.cancelRelocation(allocationId); } /** @@ -414,6 +430,7 @@ public final class ShardRouting implements Streamable, ToXContent { assert state == ShardRoutingState.STARTED; version++; state = ShardRoutingState.INITIALIZING; + allocationId = AllocationId.newInitializing(); } /** @@ -424,11 +441,15 @@ public final class ShardRouting implements Streamable, ToXContent { void moveToStarted() { ensureNotFrozen(); version++; - assert state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING; + assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this; relocatingNodeId = null; restoreSource = null; - state = ShardRoutingState.STARTED; unassignedInfo = null; // we keep the unassigned data until the shard is started + if (allocationId.getRelocationId() != null) { + // target relocation + allocationId = AllocationId.finishRelocation(allocationId); + } + state = ShardRoutingState.STARTED; } /** @@ -482,6 +503,9 @@ public final class ShardRouting implements Streamable, ToXContent { if (relocatingNodeId != null ? !relocatingNodeId.equals(that.relocatingNodeId) : that.relocatingNodeId != null) { return false; } + if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) { + return false; + } if (state != that.state) { return false; } @@ -506,6 +530,7 @@ public final class ShardRouting implements Streamable, ToXContent { result = 31 * result + (primary ? 1 : 0); result = 31 * result + (state != null ? state.hashCode() : 0); result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0); + result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); return hashCode = result; } @@ -529,10 +554,14 @@ public final class ShardRouting implements Streamable, ToXContent { } else { sb.append("[R]"); } + sb.append(", v[").append(version).append("]"); if (this.restoreSource != null) { sb.append(", restoring[" + restoreSource + "]"); } sb.append(", s[").append(state).append("]"); + if (allocationId != null) { + sb.append(", a").append(allocationId); + } if (this.unassignedInfo != null) { sb.append(", ").append(unassignedInfo.toString()); } @@ -547,11 +576,16 @@ public final class ShardRouting implements Streamable, ToXContent { .field("node", currentNodeId()) .field("relocating_node", relocatingNodeId()) .field("shard", shardId().id()) - .field("index", shardId().index().name()); + .field("index", shardId().index().name()) + .field("version", version); + if (restoreSource() != null) { builder.field("restore_source"); restoreSource().toXContent(builder, params); } + if (allocationId != null) { + allocationId.toXContent(builder, params); + } if (unassignedInfo != null) { unassignedInfo.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 2be85e4bc37..6e9c5c88914 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -42,7 +43,7 @@ public class UnassignedInfo implements ToXContent, Writeable { public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime"); public static final String INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = "index.unassigned.node_left.delayed_timeout"; - private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMillis(0); + private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1); /** * Reason why the shard is in unassigned state. @@ -95,28 +96,37 @@ public class UnassignedInfo implements ToXContent, Writeable { private final Reason reason; private final long timestamp; - private final String details; + private final String message; + private final Throwable failure; - public UnassignedInfo(Reason reason, String details) { - this(reason, System.currentTimeMillis(), details); + public UnassignedInfo(Reason reason, String message) { + this(reason, System.currentTimeMillis(), message, null); } - private UnassignedInfo(Reason reason, long timestamp, String details) { + public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure) { + this(reason, System.currentTimeMillis(), message, failure); + } + + private UnassignedInfo(Reason reason, long timestamp, String message, Throwable failure) { this.reason = reason; this.timestamp = timestamp; - this.details = details; + this.message = message; + this.failure = failure; + assert !(message == null && failure != null) : "provide a message if a failure exception is provided"; } UnassignedInfo(StreamInput in) throws IOException { this.reason = Reason.values()[(int) in.readByte()]; this.timestamp = in.readLong(); - this.details = in.readOptionalString(); + this.message = in.readOptionalString(); + this.failure = in.readThrowable(); } public void writeTo(StreamOutput out) throws IOException { out.writeByte((byte) reason.ordinal()); out.writeLong(timestamp); - out.writeOptionalString(details); + out.writeOptionalString(message); + out.writeThrowable(failure); } public UnassignedInfo readFrom(StreamInput in) throws IOException { @@ -144,8 +154,27 @@ public class UnassignedInfo implements ToXContent, Writeable { * Returns optional details explaining the reasons. */ @Nullable + public String getMessage() { + return this.message; + } + + /** + * Returns additional failure exception details if exists. + */ + @Nullable + public Throwable getFailure() { + return failure; + } + + /** + * Builds a string representation of the message and the failure if exists. + */ + @Nullable public String getDetails() { - return this.details; + if (message == null) { + return null; + } + return message + (failure == null ? "" : ", failure " + ExceptionsHelper.detailedMessage(failure)); } /** @@ -228,23 +257,28 @@ public class UnassignedInfo implements ToXContent, Writeable { return nextDelay == Long.MAX_VALUE ? 0l : nextDelay; } - @Override - public String toString() { + public String shortSummary() { StringBuilder sb = new StringBuilder(); - sb.append("unassigned_info[[reason=").append(reason).append("]"); + sb.append("[reason=").append(reason).append("]"); sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(timestamp)).append("]"); + String details = getDetails(); if (details != null) { sb.append(", details[").append(details).append("]"); } - sb.append("]"); return sb.toString(); } + @Override + public String toString() { + return "unassigned_info[" + shortSummary() + "]"; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("unassigned_info"); builder.field("reason", reason); builder.field("at", DATE_TIME_FORMATTER.printer().print(timestamp)); + String details = getDetails(); if (details != null) { builder.field("details", details); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index c02ad4ea075..7a560642ec0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -86,7 +86,7 @@ public class AllocationService extends AbstractComponent { } public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { - return applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(failedShard, null))); + return applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); } /** @@ -101,7 +101,7 @@ public class AllocationService extends AbstractComponent { FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo()); boolean changed = false; for (FailedRerouteAllocation.FailedShard failedShard : failedShards) { - changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.details)); + changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure)); } if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable()); @@ -247,7 +247,7 @@ public class AllocationService extends AbstractComponent { } } for (ShardRouting shardToFail : shardsToFail) { - changed |= applyFailedShard(allocation, shardToFail, false, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing")); + changed |= applyFailedShard(allocation, shardToFail, false, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing")); } // now, go over and elect a new primary if possible, not, from this code block on, if one is elected, @@ -324,41 +324,50 @@ public class AllocationService extends AbstractComponent { for (ShardRouting startedShard : startedShardEntries) { assert startedShard.initializing(); - // retrieve the relocating node id before calling startedShard(). - String relocatingNodeId = null; + // validate index still exists. strictly speaking this is not needed but it gives clearer logs + if (routingNodes.routingTable().index(startedShard.index()) == null) { + logger.debug("{} ignoring shard started, unknown index (routing: {})", startedShard.shardId(), startedShard); + continue; + } + RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId()); - if (currentRoutingNode != null) { - for (ShardRouting shard : currentRoutingNode) { - if (shard.shardId().equals(startedShard.shardId())) { - relocatingNodeId = shard.relocatingNodeId(); - if (!shard.started()) { - dirty = true; - routingNodes.started(shard); - } - break; + if (currentRoutingNode == null) { + logger.debug("{} failed to find shard in order to start it [failed to find node], ignoring (routing: {})", startedShard.shardId(), startedShard); + continue; + } + + for (ShardRouting shard : currentRoutingNode) { + if (shard.allocationId().getId().equals(startedShard.allocationId().getId())) { + if (shard.active()) { + logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard); + } else { + dirty = true; + // override started shard with the latest copy. Capture it now , before starting the shard destroys it... + startedShard = new ShardRouting(shard); + routingNodes.started(shard); + logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard); } + break; } } // startedShard is the current state of the shard (post relocation for example) // this means that after relocation, the state will be started and the currentNodeId will be // the node we relocated to - - if (relocatingNodeId == null) { + if (startedShard.relocatingNodeId() == null) { continue; } - RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(relocatingNodeId); + RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(startedShard.relocatingNodeId()); if (sourceRoutingNode != null) { while (sourceRoutingNode.hasNext()) { ShardRouting shard = sourceRoutingNode.next(); - if (shard.shardId().equals(startedShard.shardId())) { - if (shard.relocating()) { - dirty = true; - sourceRoutingNode.remove(); - break; - } + if (shard.allocationId().getId().equals(startedShard.allocationId().getRelocationId())) { + assert shard.relocating() : "source shard for relocation is not marked as relocating. source " + shard + ", started target " + startedShard; + dirty = true; + sourceRoutingNode.remove(); + break; } } } @@ -371,129 +380,105 @@ public class AllocationService extends AbstractComponent { * require relocation. */ private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList, UnassignedInfo unassignedInfo) { - // create a copy of the failed shard, since we assume we can change possible references to it without - // changing the state of failed shard - failedShard = new ShardRouting(failedShard); - IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index()); if (indexRoutingTable == null) { + logger.debug("{} ignoring shard failure, unknown index in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); return false; } RoutingNodes routingNodes = allocation.routingNodes(); - boolean dirty = false; - if (failedShard.relocatingNodeId() != null) { - // the shard is relocating, either in initializing (recovery from another node) or relocating (moving to another node) - if (failedShard.initializing()) { - // the shard is initializing and recovering from another node - // first, we need to cancel the current node that is being initialized - RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.currentNodeId()); - if (initializingNode != null) { - while (initializingNode.hasNext()) { - ShardRouting shardRouting = initializingNode.next(); - if (shardRouting.equals(failedShard)) { - dirty = true; - initializingNode.remove(); - if (addToIgnoreList) { - // make sure we ignore this shard on the relevant node - allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); - } - break; - } - } - } - if (dirty) { - // now, find the node that we are relocating *from*, and cancel its relocation - RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId()); - if (relocatingFromNode != null) { - for (ShardRouting shardRouting : relocatingFromNode) { - if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.relocating()) { - dirty = true; - routingNodes.cancelRelocation(shardRouting); - break; - } - } - } - } else { - logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard); - } - return dirty; - } else if (failedShard.relocating()) { - // the shard is relocating, meaning its the source the shard is relocating from - // first, we need to cancel the current relocation from the current node - // now, find the node that we are recovering from, cancel the relocation, remove it from the node - // and add it to the unassigned shards list... - RoutingNodes.RoutingNodeIterator relocatingFromNode = routingNodes.routingNodeIter(failedShard.currentNodeId()); - if (relocatingFromNode != null) { - while (relocatingFromNode.hasNext()) { - ShardRouting shardRouting = relocatingFromNode.next(); - if (shardRouting.equals(failedShard)) { - dirty = true; - if (addToIgnoreList) { - // make sure we ignore this shard on the relevant node - allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); - } - relocatingFromNode.moveToUnassigned(unassignedInfo); - break; - } - } - } - if (dirty) { - // next, we need to find the target initializing shard that is recovering from, and remove it... - RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId()); - if (initializingNode != null) { - while (initializingNode.hasNext()) { - ShardRouting shardRouting = initializingNode.next(); - if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.initializing()) { - dirty = true; - initializingNode.remove(); - } - } - } - } else { - logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard); - } - } else { - throw new IllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard); + RoutingNodes.RoutingNodeIterator matchedNode = routingNodes.routingNodeIter(failedShard.currentNodeId()); + if (matchedNode == null) { + logger.debug("{} ignoring shard failure, unknown node in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); + return false; + } + + boolean matchedShard = false; + while (matchedNode.hasNext()) { + ShardRouting routing = matchedNode.next(); + if (routing.allocationId().getId().equals(failedShard.allocationId().getId())) { + matchedShard = true; + logger.debug("{} failed shard {} found in routingNodes, failing it ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); + break; } - } else { - // the shard is not relocating, its either started, or initializing, just cancel it and move on... - RoutingNodes.RoutingNodeIterator node = routingNodes.routingNodeIter(failedShard.currentNodeId()); - if (node != null) { - while (node.hasNext()) { - ShardRouting shardRouting = node.next(); - if (shardRouting.equals(failedShard)) { - dirty = true; - if (addToIgnoreList) { - // make sure we ignore this shard on the relevant node - allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); - } - // move all the shards matching the failed shard to the end of the unassigned list - // so we give a chance for other allocations and won't create poison failed allocations - // that can keep other shards from being allocated (because of limits applied on how many - // shards we can start per node) - List shardsToMove = Lists.newArrayList(); - for (Iterator unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) { - ShardRouting unassignedShardRouting = unassignedIt.next(); - if (unassignedShardRouting.shardId().equals(failedShard.shardId())) { - unassignedIt.remove(); - shardsToMove.add(unassignedShardRouting); - } - } - if (!shardsToMove.isEmpty()) { - routingNodes.unassigned().addAll(shardsToMove); - } + } - node.moveToUnassigned(unassignedInfo); + if (matchedShard == false) { + logger.debug("{} ignoring shard failure, unknown allocation id in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); + return false; + } + + // replace incoming instance to make sure we work on the latest one. Copy it to maintain information during modifications. + failedShard = new ShardRouting(matchedNode.current()); + + // remove the current copy of the shard + matchedNode.remove(); + + if (addToIgnoreList) { + // make sure we ignore this shard on the relevant node + allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); + } + + if (failedShard.relocatingNodeId() != null && failedShard.initializing()) { + // The shard is a target of a relocating shard. In that case we only + // need to remove the target shard and cancel the source relocation. + // No shard is left unassigned + logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", failedShard, unassignedInfo.shortSummary()); + RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId()); + if (relocatingFromNode != null) { + for (ShardRouting shardRouting : relocatingFromNode) { + if (shardRouting.allocationId().getId().equals(failedShard.allocationId().getRelocationId())) { + logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", failedShard.shardId(), shardRouting, unassignedInfo.shortSummary()); + routingNodes.cancelRelocation(shardRouting); break; } } } - if (!dirty) { - logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard); + } else { + // The fail shard is the main copy of the current shard routing. Any + // relocation will be cancelled (and the target shard removed as well) + // and the shard copy needs to be marked as unassigned + + if (failedShard.relocatingNodeId() != null) { + // handle relocation source shards. we need to find the target initializing shard that is recovering from, and remove it... + assert failedShard.initializing() == false; // should have been dealt with and returned + assert failedShard.relocating(); + + RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId()); + if (initializingNode != null) { + while (initializingNode.hasNext()) { + ShardRouting shardRouting = initializingNode.next(); + if (shardRouting.allocationId().getId().equals(failedShard.allocationId().getRelocationId())) { + assert shardRouting.initializing() : shardRouting; + assert failedShard.allocationId().getId().equals(shardRouting.allocationId().getRelocationId()) + : "found target shard's allocation relocation id is different than source"; + logger.trace("{} is removed due to the failure of the source shard", shardRouting); + initializingNode.remove(); + } + } + } } + + // move all the shards matching the failed shard to the end of the unassigned list + // so we give a chance for other allocations and won't create poison failed allocations + // that can keep other shards from being allocated (because of limits applied on how many + // shards we can start per node) + List shardsToMove = Lists.newArrayList(); + for (Iterator unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) { + ShardRouting unassignedShardRouting = unassignedIt.next(); + if (unassignedShardRouting.shardId().equals(failedShard.shardId())) { + unassignedIt.remove(); + shardsToMove.add(unassignedShardRouting); + } + } + if (!shardsToMove.isEmpty()) { + routingNodes.unassigned().addAll(shardsToMove); + } + + matchedNode.moveToUnassigned(unassignedInfo); } - return dirty; + assert matchedNode.isRemoved() : "failedShard " + failedShard + " was matched but wasn't removed"; + return true; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java index 305768c8d28..24e38279f4d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -39,11 +40,18 @@ public class FailedRerouteAllocation extends RoutingAllocation { */ public static class FailedShard { public final ShardRouting shard; - public final String details; + public final String message; + public final Throwable failure; - public FailedShard(ShardRouting shard, String details) { + public FailedShard(ShardRouting shard, String message, Throwable failure) { this.shard = shard; - this.details = details; + this.message = message; + this.failure = failure; + } + + @Override + public String toString() { + return "failed shard, shard " + shard + ", message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]"; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 35116dfaa2c..7873d906aa5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -688,7 +688,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); } - routingNodes.assign(shard, routingNodes.node(minNode.getNodeId()).nodeId()); + routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId()); changed = true; continue; // don't add to ignoreUnassigned } else { @@ -783,8 +783,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards routingNodes.relocate(candidate, lowRoutingNode.nodeId()); } else { - assert candidate.unassigned(); - routingNodes.assign(candidate, routingNodes.node(minNode.getNodeId()).nodeId()); + routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId()); } return true; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java index d7697193222..670ccb52317 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java @@ -225,7 +225,7 @@ public class AllocateAllocationCommand implements AllocationCommand { continue; } it.remove(); - routingNodes.assign(shardRouting, routingNode.nodeId()); + routingNodes.initialize(shardRouting, routingNode.nodeId()); if (shardRouting.primary()) { // we need to clear the post allocation flag, since its an explicit allocation of the primary shard // and we want to force allocate it (and create a new index for it) diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index fe5200e5f40..6303edb3fd4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -519,11 +519,11 @@ public class InternalClusterService extends AbstractLifecycleComponent= 4)", coordinates.children.size()); - } else if (!coordinates.children.get(0).coordinate.equals( - coordinates.children.get(coordinates.children.size() - 1).coordinate)) { - throw new ElasticsearchParseException("invalid LinearRing found (coordinates are not closed)"); + } + + int numValidPts; + if (coordinates.children.size() < (numValidPts = (coerce) ? 3 : 4)) { + throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= " + numValidPts + ")(", + coordinates.children.size()); + } + + if (!coordinates.children.get(0).coordinate.equals( + coordinates.children.get(coordinates.children.size() - 1).coordinate)) { + if (coerce) { + coordinates.children.add(coordinates.children.get(0)); + } else { + throw new ElasticsearchParseException("invalid LinearRing found (coordinates are not closed)"); + } } return parseLineString(coordinates); } - protected static PolygonBuilder parsePolygon(CoordinateNode coordinates, Orientation orientation) { + protected static PolygonBuilder parsePolygon(CoordinateNode coordinates, final Orientation orientation, final boolean coerce) { if (coordinates.children == null || coordinates.children.isEmpty()) { throw new ElasticsearchParseException("invalid LinearRing provided for type polygon. Linear ring must be an array of coordinates"); } - LineStringBuilder shell = parseLinearRing(coordinates.children.get(0)); + LineStringBuilder shell = parseLinearRing(coordinates.children.get(0), coerce); PolygonBuilder polygon = new PolygonBuilder(shell.points, orientation); for (int i = 1; i < coordinates.children.size(); i++) { - polygon.hole(parseLinearRing(coordinates.children.get(i))); + polygon.hole(parseLinearRing(coordinates.children.get(i), coerce)); } return polygon; } - protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates, Orientation orientation) { + protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates, final Orientation orientation, + final boolean coerce) { MultiPolygonBuilder polygons = newMultiPolygon(orientation); for (CoordinateNode node : coordinates.children) { - polygons.polygon(parsePolygon(node, orientation)); + polygons.polygon(parsePolygon(node, orientation, coerce)); } return polygons; } @@ -917,13 +931,15 @@ public abstract class ShapeBuilder implements ToXContent { * @return Geometry[] geometries of the GeometryCollection * @throws IOException Thrown if an error occurs while reading from the XContentParser */ - protected static GeometryCollectionBuilder parseGeometries(XContentParser parser, Orientation orientation) throws IOException { + protected static GeometryCollectionBuilder parseGeometries(XContentParser parser, GeoShapeFieldMapper mapper) throws + IOException { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("geometries must be an array of geojson objects"); } XContentParser.Token token = parser.nextToken(); - GeometryCollectionBuilder geometryCollection = newGeometryCollection(orientation); + GeometryCollectionBuilder geometryCollection = newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper + .fieldType().orientation()); while (token != XContentParser.Token.END_ARRAY) { ShapeBuilder shapeBuilder = GeoShapeType.parse(parser); geometryCollection.shape(shapeBuilder); diff --git a/core/src/main/java/org/elasticsearch/common/io/PathUtils.java b/core/src/main/java/org/elasticsearch/common/io/PathUtils.java index 103ac19df68..ada11bfd0fa 100644 --- a/core/src/main/java/org/elasticsearch/common/io/PathUtils.java +++ b/core/src/main/java/org/elasticsearch/common/io/PathUtils.java @@ -92,6 +92,15 @@ public final class PathUtils { return null; } + /** + * Tries to resolve the given file uri against the list of available roots. + * + * If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned. + */ + public static Path get(Path[] roots, URI uri) { + return get(roots, PathUtils.get(uri).normalize().toString()); + } + /** * Returns the default FileSystem. */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 3cafd299cd9..5ea3444ade5 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -508,16 +508,26 @@ public abstract class StreamInput extends InputStream { final String name = readString(); return (T) readException(this, name); case 1: - // this sucks it would be nice to have a better way to construct those? - String msg = readOptionalString(); - final int idx = msg.indexOf(" (resource="); - final String resource = msg.substring(idx + " (resource=".length(), msg.length()-1); - msg = msg.substring(0, idx); - return (T) readStackTrace(new CorruptIndexException(msg, resource, readThrowable()), this); // Lucene 5.3 will have getters for all these + String msg1 = readOptionalString(); + String resource1 = readOptionalString(); + return (T) readStackTrace(new CorruptIndexException(msg1, resource1, readThrowable()), this); case 2: - return (T) readStackTrace(new IndexFormatTooNewException(readOptionalString(), -1, -1, -1), this); // Lucene 5.3 will have getters for all these + String resource2 = readOptionalString(); + int version2 = readInt(); + int minVersion2 = readInt(); + int maxVersion2 = readInt(); + return (T) readStackTrace(new IndexFormatTooNewException(resource2, version2, minVersion2, maxVersion2), this); case 3: - return (T) readStackTrace(new IndexFormatTooOldException(readOptionalString(), -1, -1, -1), this); // Lucene 5.3 will have getters for all these + String resource3 = readOptionalString(); + if (readBoolean()) { + int version3 = readInt(); + int minVersion3 = readInt(); + int maxVersion3 = readInt(); + return (T) readStackTrace(new IndexFormatTooOldException(resource3, version3, minVersion3, maxVersion3), this); + } else { + String version3 = readOptionalString(); + return (T) readStackTrace(new IndexFormatTooOldException(resource3, version3), this); + } case 4: return (T) readStackTrace(new NullPointerException(readOptionalString()), this); case 5: diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 004bf047770..b9e6a46070f 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -31,6 +31,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.text.Text; import org.joda.time.ReadableInstant; @@ -43,6 +44,8 @@ import java.util.Date; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * @@ -454,19 +457,100 @@ public abstract class StreamOutput extends OutputStream { } } + static { + assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1: "Remove these regex once we upgrade to Lucene 5.3 and get proper getters for these expections"; + } + private final static Pattern CORRUPT_INDEX_EXCEPTION_REGEX = Regex.compile("^(.+) \\(resource=(.+)\\)$", ""); + private final static Pattern INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+) \\(needs to be between (-?\\d+) and (-?\\d+)\\)", ""); + private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1 = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+)(?: \\(needs to be between (-?\\d+) and (-?\\d+)\\)). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); + private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2 = Regex.compile("Format version is not supported \\(resource (.+)\\): (.+). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); + + private static int parseIntSafe(String val, int defaultVal) { + try { + return Integer.parseInt(val); + } catch (NumberFormatException ex) { + return defaultVal; + } + } + public void writeThrowable(Throwable throwable) throws IOException { if (throwable == null) { writeBoolean(false); } else { writeBoolean(true); boolean writeCause = true; + boolean writeMessage = true; if (throwable instanceof CorruptIndexException) { writeVInt(1); + // Lucene 5.3 will have getters for all these + // we should switch to using getters instead of trying to parse the message: + // writeOptionalString(((CorruptIndexException)throwable).getDescription()); + // writeOptionalString(((CorruptIndexException)throwable).getResource()); + Matcher matcher = CORRUPT_INDEX_EXCEPTION_REGEX.matcher(throwable.getMessage()); + if (matcher.find()) { + writeOptionalString(matcher.group(1)); // message + writeOptionalString(matcher.group(2)); // resource + } else { + // didn't match + writeOptionalString("???"); // message + writeOptionalString("???"); // resource + } + writeMessage = false; } else if (throwable instanceof IndexFormatTooNewException) { writeVInt(2); + // Lucene 5.3 will have getters for all these + // we should switch to using getters instead of trying to parse the message: + // writeOptionalString(((CorruptIndexException)throwable).getResource()); + // writeInt(((IndexFormatTooNewException)throwable).getVersion()); + // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); + // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); + Matcher matcher = INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX.matcher(throwable.getMessage()); + if (matcher.find()) { + writeOptionalString(matcher.group(1)); // resource + writeInt(parseIntSafe(matcher.group(2), -1)); // version + writeInt(parseIntSafe(matcher.group(3), -1)); // min version + writeInt(parseIntSafe(matcher.group(4), -1)); // max version + } else { + // didn't match + writeOptionalString("???"); // resource + writeInt(-1); // version + writeInt(-1); // min version + writeInt(-1); // max version + } + writeMessage = false; writeCause = false; } else if (throwable instanceof IndexFormatTooOldException) { writeVInt(3); + // Lucene 5.3 will have getters for all these + // we should switch to using getters instead of trying to parse the message: + // writeOptionalString(((CorruptIndexException)throwable).getResource()); + // writeInt(((IndexFormatTooNewException)throwable).getVersion()); + // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); + // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); + Matcher matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1.matcher(throwable.getMessage()); + if (matcher.find()) { + // version with numeric version in constructor + writeOptionalString(matcher.group(1)); // resource + writeBoolean(true); + writeInt(parseIntSafe(matcher.group(2), -1)); // version + writeInt(parseIntSafe(matcher.group(3), -1)); // min version + writeInt(parseIntSafe(matcher.group(4), -1)); // max version + } else { + matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2.matcher(throwable.getMessage()); + if (matcher.matches()) { + writeOptionalString(matcher.group(1)); // resource + writeBoolean(false); + writeOptionalString(matcher.group(2)); // version + } else { + // didn't match + writeOptionalString("???"); // resource + writeBoolean(true); + writeInt(-1); // version + writeInt(-1); // min version + writeInt(-1); // max version + } + } + writeMessage = false; writeCause = false; } else if (throwable instanceof NullPointerException) { writeVInt(4); @@ -521,7 +605,9 @@ public abstract class StreamOutput extends OutputStream { return; } - writeOptionalString(throwable.getMessage()); + if (writeMessage) { + writeOptionalString(throwable.getMessage()); + } if (writeCause) { writeThrowable(throwable.getCause()); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java b/core/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java new file mode 100644 index 00000000000..6470f97f3f6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.lucene.store; + +import org.apache.lucene.store.IndexInput; + +import java.io.EOFException; +import java.io.IOException; + +/** + * Wraps array of bytes into IndexInput + */ +public class ByteArrayIndexInput extends IndexInput { + private final byte[] bytes; + + private int pos; + + private int offset; + + private int length; + + public ByteArrayIndexInput(String resourceDesc, byte[] bytes) { + this(resourceDesc, bytes, 0, bytes.length); + } + + public ByteArrayIndexInput(String resourceDesc, byte[] bytes, int offset, int length) { + super(resourceDesc); + this.bytes = bytes; + this.offset = offset; + this.length = length; + } + + @Override + public void close() throws IOException { + } + + @Override + public long getFilePointer() { + return pos; + } + + @Override + public void seek(long l) throws IOException { + if (l < 0) { + throw new IllegalArgumentException("Seeking to negative position: " + pos); + } else if (l > length) { + throw new EOFException("seek past EOF"); + } + pos = (int)l; + } + + @Override + public long length() { + return length; + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + if (offset >= 0L && length >= 0L && offset + length <= this.length) { + return new ByteArrayIndexInput(sliceDescription, bytes, this.offset + (int)offset, (int)length); + } else { + throw new IllegalArgumentException("slice() " + sliceDescription + " out of bounds: offset=" + offset + ",length=" + length + ",fileLength=" + this.length + ": " + this); + } + } + + @Override + public byte readByte() throws IOException { + if (pos >= offset + length) { + throw new EOFException("seek past EOF"); + } + return bytes[offset + pos++]; + } + + @Override + public void readBytes(final byte[] b, final int offset, int len) throws IOException { + if (pos + len > this.offset + length) { + throw new EOFException("seek past EOF"); + } + System.arraycopy(bytes, this.offset + pos, b, offset, len); + pos += len; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java b/core/src/main/java/org/elasticsearch/common/lucene/store/IndexOutputOutputStream.java similarity index 89% rename from core/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java rename to core/src/main/java/org/elasticsearch/common/lucene/store/IndexOutputOutputStream.java index 156ddb5f3fd..a6617b78438 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/store/OutputStreamIndexOutput.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/store/IndexOutputOutputStream.java @@ -25,12 +25,13 @@ import java.io.IOException; import java.io.OutputStream; /** + * {@link OutputStream} that writes into underlying IndexOutput */ -public class OutputStreamIndexOutput extends OutputStream { +public class IndexOutputOutputStream extends OutputStream { private final IndexOutput out; - public OutputStreamIndexOutput(IndexOutput out) { + public IndexOutputOutputStream(IndexOutput out) { this.out = out; } diff --git a/core/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/core/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index 30b959d25b6..cfcd209b435 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/core/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -43,29 +43,17 @@ public final class Fuzziness implements ToXContent { public static final Fuzziness AUTO = new Fuzziness("AUTO"); public static final ParseField FIELD = new ParseField(X_FIELD_NAME.camelCase().getValue()); - private final Object fuzziness; + private final String fuzziness; private Fuzziness(int fuzziness) { Preconditions.checkArgument(fuzziness >= 0 && fuzziness <= 2, "Valid edit distances are [0, 1, 2] but was [" + fuzziness + "]"); - this.fuzziness = fuzziness; - } - - private Fuzziness(float fuzziness) { - Preconditions.checkArgument(fuzziness >= 0.0 && fuzziness < 1.0f, "Valid similarities must be in the interval [0..1] but was [" + fuzziness + "]"); - this.fuzziness = fuzziness; + this.fuzziness = Integer.toString(fuzziness); } private Fuzziness(String fuzziness) { this.fuzziness = fuzziness; } - /** - * Creates a {@link Fuzziness} instance from a similarity. The value must be in the range [0..1) - */ - public static Fuzziness fromSimilarity(float similarity) { - return new Fuzziness(similarity); - } - /** * Creates a {@link Fuzziness} instance from an edit distance. The value must be one of [0, 1, 2] */ @@ -133,19 +121,17 @@ public final class Fuzziness implements ToXContent { } public int asDistance(String text) { - if (fuzziness instanceof String) { - if (this == AUTO) { //AUTO - final int len = termLen(text); - if (len <= 2) { - return 0; - } else if (len > 5) { - return 2; - } else { - return 1; - } + if (this == AUTO) { //AUTO + final int len = termLen(text); + if (len <= 2) { + return 0; + } else if (len > 5) { + return 2; + } else { + return 1; } } - return FuzzyQuery.floatToEdits(asFloat(), termLen(text)); + return Math.min(2, asInt()); } public TimeValue asTimeValue() { @@ -214,37 +200,6 @@ public final class Fuzziness implements ToXContent { return Float.parseFloat(fuzziness.toString()); } - public float asSimilarity() { - return asSimilarity(null); - } - - public float asSimilarity(String text) { - if (this == AUTO) { - final int len = termLen(text); - if (len <= 2) { - return 0.0f; - } else if (len > 5) { - return 0.5f; - } else { - return 0.66f; - } -// return dist == 0 ? dist : Math.min(0.999f, Math.max(0.0f, 1.0f - ((float) dist/ (float) termLen(text)))); - } - if (fuzziness instanceof Float) { // it's a similarity - return ((Float) fuzziness).floatValue(); - } else if (fuzziness instanceof Integer) { // it's an edit! - int dist = Math.min(((Integer) fuzziness).intValue(), - LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE); - return Math.min(0.999f, Math.max(0.0f, 1.0f - ((float) dist / (float) termLen(text)))); - } else { - final float similarity = Float.parseFloat(fuzziness.toString()); - if (similarity >= 0.0f && similarity < 1.0f) { - return similarity; - } - } - throw new IllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]"); - } - private int termLen(String text) { return text == null ? 5 : text.codePointCount(0, text.length()); // 5 avg term length in english } diff --git a/core/src/main/java/org/elasticsearch/common/util/URIPattern.java b/core/src/main/java/org/elasticsearch/common/util/URIPattern.java new file mode 100644 index 00000000000..0b9bb222521 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/util/URIPattern.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.elasticsearch.common.regex.Regex; + +import java.net.URI; +import java.net.URISyntaxException; + +/** + * URI Pattern matcher + * + * The pattern is URI in which authority, path, query and fragment can be replace with simple pattern. + * + * For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain + * with any port, with path that starts some_path and with any query and fragment. + */ +public class URIPattern { + private final URI uriPattern; + + /** + * Constructs uri pattern + * @param pattern + */ + public URIPattern(String pattern) { + try { + uriPattern = new URI(pattern); + } catch (URISyntaxException ex) { + throw new IllegalArgumentException("cannot parse URI pattern [" + pattern + "]"); + } + } + + /** + * Returns true if the given uri matches the pattern + */ + public boolean match(URI uri) { + return matchNormalized(uri.normalize()); + } + + public static boolean match(URIPattern[] patterns, URI uri) { + URI normalized = uri.normalize(); + for (URIPattern pattern : patterns) { + if (pattern.matchNormalized(normalized)) { + return true; + } + } + return false; + } + + private boolean matchNormalized(URI uri) { + if(uriPattern.isOpaque()) { + // This url only has scheme, scheme-specific part and fragment + return uri.isOpaque() && + match(uriPattern.getScheme(), uri.getScheme()) && + match(uriPattern.getSchemeSpecificPart(), uri.getSchemeSpecificPart()) && + match(uriPattern.getFragment(), uri.getFragment()); + + } else { + return match(uriPattern.getScheme(), uri.getScheme()) && + match(uriPattern.getAuthority(), uri.getAuthority()) && + match(uriPattern.getQuery(), uri.getQuery()) && + match(uriPattern.getPath(), uri.getPath()) && + match(uriPattern.getFragment(), uri.getFragment()); + } + } + + private boolean match(String pattern, String value) { + if (value == null) { + // If the pattern is empty or matches anything - it's a match + if (pattern == null || Regex.isMatchAllPattern(pattern)) { + return true; + } + } + return Regex.simpleMatch(pattern, value); + } + + @Override + public String toString() { + return uriPattern.toString(); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java index c4ed2375235..fe1b2a2438d 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java @@ -105,6 +105,7 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { } else if (!(command instanceof PrioritizedFutureTask)) { // it might be a callable wrapper... command = new TieBreakingPrioritizedRunnable(command, Priority.NORMAL, insertionOrder.incrementAndGet()); } + super.execute(command); if (timeout.nanos() >= 0) { if (command instanceof TieBreakingPrioritizedRunnable) { ((TieBreakingPrioritizedRunnable) command).scheduleTimeout(timer, timeoutCallback, timeout); @@ -114,7 +115,6 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { throw new UnsupportedOperationException("Execute with timeout is not supported for future tasks"); } } - super.execute(command); } @Override @@ -161,7 +161,8 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { private Runnable runnable; private final long insertionOrder; - private ScheduledFuture timeoutFuture; + private volatile ScheduledFuture timeoutFuture; + private volatile boolean started = false; TieBreakingPrioritizedRunnable(PrioritizedRunnable runnable, long insertionOrder) { this(runnable, runnable.priority(), insertionOrder); @@ -175,6 +176,9 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { @Override public void run() { + // make the task as stared. This is needed for synchronization with the timeout handling + // see #scheduleTimeout() + started = true; FutureUtils.cancel(timeoutFuture); runAndClean(runnable); } @@ -197,6 +201,10 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { } } }, timeValue.nanos(), TimeUnit.NANOSECONDS); + if (started) { + // if the actual action already it might have missed the setting of the future. Clean it ourselves. + FutureUtils.cancel(timeoutFuture); + } } /** diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java new file mode 100644 index 00000000000..51511e445c2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.io.stream.StreamableReader; + +import java.io.IOException; + +/** + * Indicates that the class supports XContent deserialization. + * + * This interface is similar to what {@link StreamableReader} does, only it works with XContent serialization + * instead of binary serialization. + */ +public interface FromXContentBuilder { + /** + * Parses an object with the type T from parser + */ + T fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException; +} diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 7ce2db5dcad..25ecc07bba6 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import com.google.common.base.Charsets; + import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -41,6 +42,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.math.BigDecimal; import java.math.RoundingMode; +import java.nio.file.Path; import java.util.Calendar; import java.util.Date; import java.util.Locale; @@ -650,21 +652,33 @@ public final class XContentBuilder implements BytesStream, Releasable { return this; } - public XContentBuilder field(String name, Iterable value) throws IOException { - startArray(name); - for (Object o : value) { - value(o); + public XContentBuilder field(String name, Iterable value) throws IOException { + if (value instanceof Path) { + //treat Paths as single value + field(name); + value(value); + } else { + startArray(name); + for (Object o : value) { + value(o); + } + endArray(); } - endArray(); return this; } - public XContentBuilder field(XContentBuilderString name, Iterable value) throws IOException { - startArray(name); - for (Object o : value) { - value(o); + public XContentBuilder field(XContentBuilderString name, Iterable value) throws IOException { + if (value instanceof Path) { + //treat Paths as single value + field(name); + value(value); + } else { + startArray(name); + for (Object o : value) { + value(o); + } + endArray(); } - endArray(); return this; } @@ -1140,26 +1154,31 @@ public final class XContentBuilder implements BytesStream, Releasable { return this; } - public XContentBuilder value(Iterable value) throws IOException { + public XContentBuilder value(Iterable value) throws IOException { if (value == null) { return nullValue(); } - startArray(); - for (Object o : value) { - value(o); + if (value instanceof Path) { + //treat as single value + writeValue(value); + } else { + startArray(); + for (Object o : value) { + value(o); + } + endArray(); } - endArray(); return this; } public XContentBuilder latlon(String name, double lat, double lon) throws IOException { return startObject(name).field("lat", lat).field("lon", lon).endObject(); } - + public XContentBuilder latlon(double lat, double lon) throws IOException { return startObject().field("lat", lat).field("lon", lon).endObject(); } - + public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException { generator.copyCurrentStructure(parser); return this; @@ -1231,7 +1250,7 @@ public final class XContentBuilder implements BytesStream, Releasable { generator.writeNull(); return; } - Class type = value.getClass(); + Class type = value.getClass(); if (type == String.class) { generator.writeString((String) value); } else if (type == Integer.class) { @@ -1255,9 +1274,12 @@ public final class XContentBuilder implements BytesStream, Releasable { generator.writeEndObject(); } else if (value instanceof Map) { writeMap((Map) value); + } else if (value instanceof Path) { + //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here + generator.writeString(value.toString()); } else if (value instanceof Iterable) { generator.writeStartArray(); - for (Object v : (Iterable) value) { + for (Object v : (Iterable) value) { writeValue(v); } generator.writeEndArray(); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java new file mode 100644 index 00000000000..d3bfbafee20 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -0,0 +1,398 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.membership.MembershipAction; + +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +/** + * This class processes incoming join request (passed zia {@link ZenDiscovery}). Incoming nodes + * are directly added to the cluster state or are accumulated during master election. + */ +public class NodeJoinController extends AbstractComponent { + + final ClusterService clusterService; + final RoutingService routingService; + final DiscoverySettings discoverySettings; + final AtomicBoolean accumulateJoins = new AtomicBoolean(false); + + // this is site while trying to become a master + final AtomicReference electionContext = new AtomicReference<>(); + + + protected final Map> pendingJoinRequests = new HashMap<>(); + + public NodeJoinController(ClusterService clusterService, RoutingService routingService, DiscoverySettings discoverySettings, Settings settings) { + super(settings); + this.clusterService = clusterService; + this.routingService = routingService; + this.discoverySettings = discoverySettings; + } + + /** + * waits for enough incoming joins from master eligible nodes to complete the master election + *

+ * You must start accumulating joins before calling this method. See {@link #startAccumulatingJoins()} + *

+ * The method will return once the local node has been elected as master or some failure/timeout has happened. + * The exact outcome is communicated via the callback parameter, which is guaranteed to be called. + * + * @param requiredMasterJoins the number of joins from master eligible needed to complete the election + * @param timeValue how long to wait before failing. a timeout is communicated via the callback's onFailure method. + * @param callback the result of the election (success or failure) will be communicated by calling methods on this + * object + **/ + public void waitToBeElectedAsMaster(int requiredMasterJoins, TimeValue timeValue, final Callback callback) { + assert accumulateJoins.get() : "waitToBeElectedAsMaster is called we are not accumulating joins"; + + final CountDownLatch done = new CountDownLatch(1); + final ElectionContext newContext = new ElectionContext(callback, requiredMasterJoins) { + @Override + void onClose() { + if (electionContext.compareAndSet(this, null)) { + stopAccumulatingJoins(); + } else { + assert false : "failed to remove current election context"; + } + done.countDown(); + } + }; + + if (electionContext.compareAndSet(null, newContext) == false) { + // should never happen, but be conservative + callback.onFailure(new IllegalStateException("double waiting for election")); + return; + } + try { + // check what we have so far.. + checkPendingJoinsAndElectIfNeeded(); + + try { + if (done.await(timeValue.millis(), TimeUnit.MILLISECONDS)) { + // callback handles everything + return; + } + } catch (InterruptedException e) { + + } + if (logger.isTraceEnabled()) { + final int pendingNodes; + synchronized (pendingJoinRequests) { + pendingNodes = pendingJoinRequests.size(); + } + logger.trace("timed out waiting to be elected. waited [{}]. pending node joins [{}]", timeValue, pendingNodes); + } + // callback will clear the context, if it's active + newContext.onFailure(new ElasticsearchTimeoutException("timed out waiting to be elected")); + } catch (Throwable t) { + logger.error("unexpected failure while waiting for incoming joins", t); + newContext.onFailure(t); + } + } + + /** + * Accumulates any future incoming join request. Pending join requests will be processed in the final steps of becoming a + * master or when {@link #stopAccumulatingJoins()} is called. + */ + public void startAccumulatingJoins() { + logger.trace("starting to accumulate joins"); + boolean b = accumulateJoins.getAndSet(true); + assert b == false : "double startAccumulatingJoins() calls"; + assert electionContext.get() == null : "startAccumulatingJoins() called, but there is an ongoing election context"; + } + + /** Stopped accumulating joins. All pending joins will be processed. Future joins will be processed immediately */ + public void stopAccumulatingJoins() { + logger.trace("stopping join accumulation"); + assert electionContext.get() == null : "stopAccumulatingJoins() called, but there is an ongoing election context"; + boolean b = accumulateJoins.getAndSet(false); + assert b : "stopAccumulatingJoins() called but not accumulating"; + synchronized (pendingJoinRequests) { + if (pendingJoinRequests.size() > 0) { + processJoins("stopping to accumulate joins"); + } + } + } + + /** + * processes or queues an incoming join request. + *

+ * Note: doesn't do any validation. This should have been done before. + */ + public void handleJoinRequest(final DiscoveryNode node, final MembershipAction.JoinCallback callback) { + synchronized (pendingJoinRequests) { + List nodeCallbacks = pendingJoinRequests.get(node); + if (nodeCallbacks == null) { + nodeCallbacks = new ArrayList<>(); + pendingJoinRequests.put(node, nodeCallbacks); + } + nodeCallbacks.add(callback); + } + if (accumulateJoins.get() == false) { + processJoins("join from node[" + node + "]"); + } else { + checkPendingJoinsAndElectIfNeeded(); + } + } + + /** + * checks if there is an on going request to become master and if it has enough pending joins. If so, the node will + * become master via a ClusterState update task. + */ + private void checkPendingJoinsAndElectIfNeeded() { + assert accumulateJoins.get() : "election check requested but we are not accumulating joins"; + final ElectionContext context = electionContext.get(); + if (context == null) { + return; + } + + int pendingMasterJoins=0; + synchronized (pendingJoinRequests) { + for (DiscoveryNode node : pendingJoinRequests.keySet()) { + if (node.isMasterNode()) { + pendingMasterJoins++; + } + } + } + if (pendingMasterJoins < context.requiredMasterJoins) { + logger.trace("not enough joins for election. Got [{}], required [{}]", pendingMasterJoins, context.requiredMasterJoins); + return; + } + if (context.pendingSetAsMasterTask.getAndSet(true)) { + logger.trace("elected as master task already submitted, ignoring..."); + return; + } + + final String source = "zen-disco-join(elected_as_master, [" + pendingMasterJoins + "] joins received)"; + clusterService.submitStateUpdateTask(source, Priority.IMMEDIATE, new ProcessJoinsTask() { + @Override + public ClusterState execute(ClusterState currentState) { + // Take into account the previous known nodes, if they happen not to be available + // then fault detection will remove these nodes. + + if (currentState.nodes().masterNode() != null) { + // TODO can we tie break here? we don't have a remote master cluster state version to decide on + logger.trace("join thread elected local node as master, but there is already a master in place: {}", currentState.nodes().masterNode()); + throw new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request"); + } + + DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().id()); + // update the fact that we are the master... + ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); + currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build(); + + // reroute now to remove any dead nodes (master may have stepped down when they left and didn't update the routing table) + RoutingAllocation.Result result = routingService.getAllocationService().reroute(currentState); + if (result.changed()) { + currentState = ClusterState.builder(currentState).routingResult(result).build(); + } + + // Add the incoming join requests. + // Note: we only do this now (after the reroute) to avoid assigning shards to these nodes. + return super.execute(currentState); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public void onFailure(String source, Throwable t) { + super.onFailure(source, t); + context.onFailure(t); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + super.clusterStateProcessed(source, oldState, newState); + context.onElectedAsMaster(newState); + } + }); + } + + /** process all pending joins */ + private void processJoins(String reason) { + clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", Priority.URGENT, new ProcessJoinsTask()); + } + + + public interface Callback { + void onElectedAsMaster(ClusterState state); + + void onFailure(Throwable t); + } + + static abstract class ElectionContext implements Callback { + private final Callback callback; + private final int requiredMasterJoins; + + /** set to true after enough joins have been seen and a cluster update task is submitted to become master */ + final AtomicBoolean pendingSetAsMasterTask = new AtomicBoolean(); + final AtomicBoolean closed = new AtomicBoolean(); + + ElectionContext(Callback callback, int requiredMasterJoins) { + this.callback = callback; + this.requiredMasterJoins = requiredMasterJoins; + } + + abstract void onClose(); + + @Override + public void onElectedAsMaster(ClusterState state) { + assert pendingSetAsMasterTask.get() : "onElectedAsMaster called but pendingSetAsMasterTask is not set"; + if (closed.compareAndSet(false, true)) { + try { + onClose(); + } finally { + callback.onElectedAsMaster(state); + } + } + } + + @Override + public void onFailure(Throwable t) { + if (closed.compareAndSet(false, true)) { + try { + onClose(); + } finally { + callback.onFailure(t); + } + } + } + } + + + /** + * Processes any pending joins via a ClusterState update task. + * Note: this task automatically fails (and fails all pending joins) if the current node is not marked as master + */ + class ProcessJoinsTask extends ProcessedClusterStateUpdateTask { + + private final List joinCallbacksToRespondTo = new ArrayList<>(); + private boolean nodeAdded = false; + + @Override + public ClusterState execute(ClusterState currentState) { + DiscoveryNodes.Builder nodesBuilder; + synchronized (pendingJoinRequests) { + if (pendingJoinRequests.isEmpty()) { + return currentState; + } + + nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); + Iterator>> iterator = pendingJoinRequests.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry> entry = iterator.next(); + final DiscoveryNode node = entry.getKey(); + joinCallbacksToRespondTo.addAll(entry.getValue()); + iterator.remove(); + if (currentState.nodes().nodeExists(node.id())) { + logger.debug("received a join request for an existing node [{}]", node); + } else { + nodeAdded = true; + nodesBuilder.put(node); + for (DiscoveryNode existingNode : currentState.nodes()) { + if (node.address().equals(existingNode.address())) { + nodesBuilder.remove(existingNode.id()); + logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode); + } + } + } + } + } + + // we must return a new cluster state instance to force publishing. This is important + // for the joining node to finalize it's join and set us as a master + final ClusterState.Builder newState = ClusterState.builder(currentState); + if (nodeAdded) { + newState.nodes(nodesBuilder); + } + + return newState.build(); + } + + @Override + public void onNoLongerMaster(String source) { + // we are rejected, so drain all pending task (execute never run) + synchronized (pendingJoinRequests) { + Iterator>> iterator = pendingJoinRequests.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry> entry = iterator.next(); + joinCallbacksToRespondTo.addAll(entry.getValue()); + iterator.remove(); + } + } + Exception e = new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request"); + innerOnFailure(e); + } + + void innerOnFailure(Throwable t) { + for (MembershipAction.JoinCallback callback : joinCallbacksToRespondTo) { + try { + callback.onFailure(t); + } catch (Exception e) { + logger.error("error during task failure", e); + } + } + } + + @Override + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure during [{}]", t, source); + innerOnFailure(t); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (nodeAdded) { + // we reroute not in the same cluster state update since in certain areas we rely on + // the node to be in the cluster state (sampled from ClusterService#state) to be there, also + // shard transitions need to better be handled in such cases + routingService.reroute("post_node_add"); + } + for (MembershipAction.JoinCallback callback : joinCallbacksToRespondTo) { + try { + callback.onSuccess(); + } catch (Exception e) { + logger.error("unexpected error during [{}]", e, source); + } + } + } + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 57dbcb98a68..892b797575a 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.Inject; @@ -93,6 +92,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen public final static String SETTING_MAX_PINGS_FROM_ANOTHER_MASTER = "discovery.zen.max_pings_from_another_master"; public final static String SETTING_SEND_LEAVE_REQUEST = "discovery.zen.send_leave_request"; public final static String SETTING_MASTER_ELECTION_FILTER_CLIENT = "discovery.zen.master_election.filter_client"; + public final static String SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT = "discovery.zen.master_election.wait_for_joins_timeout"; public final static String SETTING_MASTER_ELECTION_FILTER_DATA = "discovery.zen.master_election.filter_data"; public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; @@ -126,6 +126,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private final boolean masterElectionFilterClientNodes; private final boolean masterElectionFilterDataNodes; + private final TimeValue masterElectionWaitForJoinsTimeout; private final CopyOnWriteArrayList initialStateListeners = new CopyOnWriteArrayList<>(); @@ -142,7 +143,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Nullable private NodeService nodeService; - private final BlockingQueue> processJoinRequests = ConcurrentCollections.newBlockingQueue(); + + // must initialized in doStart(), when we have the routingService set + private volatile NodeJoinController nodeJoinController; @Inject public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, @@ -169,6 +172,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true); this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false); + this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2)); this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); if (this.joinRetryAttempts < 1) { @@ -230,6 +234,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); pingService.start(); + this.nodeJoinController = new NodeJoinController(clusterService, routingService, discoverySettings, settings); // start the join thread from a cluster state update. See {@link JoinThreadControl} for details. clusterService.submitStateUpdateTask("initial_join", new ClusterStateNonMasterUpdateTask() { @@ -353,6 +358,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private void innerJoinCluster() { DiscoveryNode masterNode = null; final Thread currentThread = Thread.currentThread(); + nodeJoinController.startAccumulatingJoins(); while (masterNode == null && joinThreadControl.joinThreadActive(currentThread)) { masterNode = findMaster(); } @@ -363,52 +369,32 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (clusterService.localNode().equals(masterNode)) { - clusterService.submitStateUpdateTask("zen-disco-join (elected_as_master)", Priority.IMMEDIATE, new ProcessedClusterStateNonMasterUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - // Take into account the previous known nodes, if they happen not to be available - // then fault detection will remove these nodes. + final int requiredJoins = Math.max(0, electMaster.minimumMasterNodes() - 1); // we count as one + logger.debug("elected as master, waiting for incoming joins ([{}] needed)", requiredJoins); + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, masterElectionWaitForJoinsTimeout, + new NodeJoinController.Callback() { + @Override + public void onElectedAsMaster(ClusterState state) { + joinThreadControl.markThreadAsDone(currentThread); + // we only starts nodesFD if we are master (it may be that we received a cluster state while pinging) + nodesFD.updateNodesAndPing(state); // start the nodes FD + sendInitialStateEventIfNeeded(); + long count = clusterJoinsCounter.incrementAndGet(); + logger.trace("cluster joins counter set to [{}] (elected as master)", count); + } - if (currentState.nodes().masterNode() != null) { - // TODO can we tie break here? we don't have a remote master cluster state version to decide on - logger.trace("join thread elected local node as master, but there is already a master in place: {}", currentState.nodes().masterNode()); - return currentState; + @Override + public void onFailure(Throwable t) { + logger.trace("failed while waiting for nodes to join, rejoining", t); + joinThreadControl.markThreadAsDoneAndStartNew(currentThread); + } } - DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(currentState.nodes()).masterNodeId(currentState.nodes().localNode().id()); - // update the fact that we are the master... - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); - currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build(); - - // eagerly run reroute to remove dead nodes from routing table - RoutingAllocation.Result result = routingService.getAllocationService().reroute(currentState); - return ClusterState.builder(currentState).routingResult(result).build(); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (newState.nodes().localNodeMaster()) { - // we only starts nodesFD if we are master (it may be that we received a cluster state while pinging) - joinThreadControl.markThreadAsDone(currentThread); - nodesFD.updateNodesAndPing(newState); // start the nodes FD - } else { - // if we're not a master it means another node published a cluster state while we were pinging - // make sure we go through another pinging round and actively join it - joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - } - sendInitialStateEventIfNeeded(); - long count = clusterJoinsCounter.incrementAndGet(); - logger.trace("cluster joins counter set to [{}] (elected as master)", count); - - } - }); + ); } else { + // process any incoming joins (they will fail because we are not the master) + nodeJoinController.stopAccumulatingJoins(); + // send join request final boolean success = joinElectedMaster(masterNode); @@ -798,7 +784,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen MetaData.Builder metaDataBuilder = MetaData.builder(updatedState.metaData()).removeAllIndices(); for (IndexMetaData indexMetaData : updatedState.metaData()) { IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.index()); - if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.uuid()) && + if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.indexUUID()) && currentIndexMetaData.version() == indexMetaData.version()) { // safe to reuse metaDataBuilder.put(currentIndexMetaData, false); @@ -878,7 +864,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) { logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode()); - throw new IllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); + throw new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); } else if (newClusterState.version() < currentState.version()) { // if the new state has a smaller version, and it has the same master node, then no need to process it logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); @@ -893,6 +879,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); + } else if (nodeJoinController == null) { + throw new IllegalStateException("discovery module is not yet started"); } else { // The minimum supported version for a node joining a master: Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion(); @@ -910,88 +898,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request membership.sendValidateJoinRequestBlocking(node, joinTimeout); - processJoinRequests.add(new Tuple<>(node, callback)); - clusterService.submitStateUpdateTask("zen-disco-receive(join from node[" + node + "])", Priority.URGENT, new ProcessedClusterStateUpdateTask() { - - private final List> drainedJoinRequests = new ArrayList<>(); - private boolean nodeAdded = false; - - @Override - public ClusterState execute(ClusterState currentState) { - processJoinRequests.drainTo(drainedJoinRequests); - if (drainedJoinRequests.isEmpty()) { - return currentState; - } - - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); - for (Tuple task : drainedJoinRequests) { - DiscoveryNode node = task.v1(); - if (currentState.nodes().nodeExists(node.id())) { - logger.debug("received a join request for an existing node [{}]", node); - } else { - nodeAdded = true; - nodesBuilder.put(node); - for (DiscoveryNode existingNode : currentState.nodes()) { - if (node.address().equals(existingNode.address())) { - nodesBuilder.remove(existingNode.id()); - logger.warn("received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode); - } - } - } - } - - - // we must return a new cluster state instance to force publishing. This is important - // for the joining node to finalize it's join and set us as a master - final ClusterState.Builder newState = ClusterState.builder(currentState); - if (nodeAdded) { - newState.nodes(nodesBuilder); - } - - return newState.build(); - } - - @Override - public void onNoLongerMaster(String source) { - // we are rejected, so drain all pending task (execute never run) - processJoinRequests.drainTo(drainedJoinRequests); - Exception e = new NotMasterException("Node [" + clusterService.localNode() + "] not master for join request from [" + node + "]"); - innerOnFailure(e); - } - - void innerOnFailure(Throwable t) { - for (Tuple drainedTask : drainedJoinRequests) { - try { - drainedTask.v2().onFailure(t); - } catch (Exception e) { - logger.error("error during task failure", e); - } - } - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - innerOnFailure(t); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (nodeAdded) { - // we reroute not in the same cluster state update since in certain areas we rely on - // the node to be in the cluster state (sampled from ClusterService#state) to be there, also - // shard transitions need to better be handled in such cases - routingService.reroute("post_node_add"); - } - for (Tuple drainedTask : drainedJoinRequests) { - try { - drainedTask.v2().onSuccess(); - } catch (Exception e) { - logger.error("unexpected error during [{}]", e, source); - } - } - } - }); + nodeJoinController.handleJoinRequest(node, callback); } } @@ -1404,4 +1311,4 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index 7fd585a6a41..78c13f8ce53 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -266,7 +266,7 @@ public class PublishClusterStateAction extends AbstractComponent { } else if (lastSeenClusterState != null) { Diff diff = lastSeenClusterState.readDiffFrom(in); lastSeenClusterState = diff.apply(lastSeenClusterState); - logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length()); + logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.stateUUID(), request.bytes().length()); } else { logger.debug("received diff for but don't have any local cluster state - requesting full state"); throw new IncompatibleClusterStateVersionException("have no local cluster state"); diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index de426df6d04..445ec7a61de 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.nio.file.FileStore; import java.nio.file.Files; @@ -178,6 +179,52 @@ public class Environment { return PathUtils.get(repoFiles, location); } + /** + * Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url + * against the list of configured repository roots + * + * If the specified url doesn't match any of the roots, returns null. + */ + public URL resolveRepoURL(URL url) { + try { + if ("file".equalsIgnoreCase(url.getProtocol())) { + if (url.getHost() == null || "".equals(url.getHost())) { + // only local file urls are supported + Path path = PathUtils.get(repoFiles, url.toURI()); + if (path == null) { + // Couldn't resolve against known repo locations + return null; + } + // Normalize URL + return path.toUri().toURL(); + } + return null; + } else if ("jar".equals(url.getProtocol())) { + String file = url.getFile(); + int pos = file.indexOf("!/"); + if (pos < 0) { + return null; + } + String jarTail = file.substring(pos); + String filePath = file.substring(0, pos); + URL internalUrl = new URL(filePath); + URL normalizedUrl = resolveRepoURL(internalUrl); + if (normalizedUrl == null) { + return null; + } + return new URL("jar", "", normalizedUrl.toExternalForm() + jarTail); + } else { + // It's not file or jar url and it didn't match the white list - reject + return null; + } + } catch (MalformedURLException ex) { + // cannot make sense of this file url + return null; + } catch (URISyntaxException ex) { + return null; + } + } + /** * The config location. */ diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 00c2c2437a2..8f0762b85b4 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -311,7 +311,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { * shard paths. The "write.lock" file is assumed to be under the shard * path's "index" directory as used by Elasticsearch. * - * @throws ElasticsearchException if any of the locks could not be acquired + * @throws LockObtainFailedException if any of the locks could not be acquired */ public static void acquireFSLockForPaths(@IndexSettings Settings indexSettings, Path... shardPaths) throws IOException { Lock[] locks = new Lock[shardPaths.length]; @@ -326,7 +326,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try { locks[i] = Lucene.acquireWriteLock(dirs[i]); } catch (IOException ex) { - throw new ElasticsearchException("unable to acquire " + + throw new LockObtainFailedException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 3afd4bb926d..e6cd435d9a3 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -198,8 +198,14 @@ public class GatewayAllocator extends AbstractComponent { for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { long version = nodeShardState.version(); // -1 version means it does not exists, which is what the API returns, and what we expect to - logger.trace("[{}] on node [{}] has version [{}] of shard", shard, nodeShardState.getNode(), version); - nodesState.put(nodeShardState.getNode(), version); + if (nodeShardState.storeException() == null) { + logger.trace("[{}] on node [{}] has version [{}] of shard", shard, nodeShardState.getNode(), version); + nodesState.put(nodeShardState.getNode(), version); + } else { + // when there is an store exception, we disregard the reported version and assign it as -1 (same as shard does not exist) + logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating as version -1", nodeShardState.storeException(), shard, nodeShardState.getNode(), version); + nodesState.put(nodeShardState.getNode(), -1); + } } int numberOfAllocationsFound = 0; @@ -339,7 +345,7 @@ public class GatewayAllocator extends AbstractComponent { // we found a match changed = true; // make sure we create one with the version from the recovered state - routingNodes.assign(new ShardRouting(shard, highestVersion), node.nodeId()); + routingNodes.initialize(new ShardRouting(shard, highestVersion), node.nodeId()); unassignedIterator.remove(); // found a node, so no throttling, no "no", and break out of the loop @@ -359,7 +365,7 @@ public class GatewayAllocator extends AbstractComponent { // we found a match changed = true; // make sure we create one with the version from the recovered state - routingNodes.assign(new ShardRouting(shard, highestVersion), node.nodeId()); + routingNodes.initialize(new ShardRouting(shard, highestVersion), node.nodeId()); unassignedIterator.remove(); } } else { @@ -514,7 +520,7 @@ public class GatewayAllocator extends AbstractComponent { } // we found a match changed = true; - routingNodes.assign(shard, lastNodeMatched.nodeId()); + routingNodes.initialize(shard, lastNodeMatched.nodeId()); unassignedIterator.remove(); } } else if (hasReplicaData == false) { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 71144a748e8..7f518936625 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent i MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData()); // automatically generate a UID for the metadata if we need to - metaDataBuilder.generateUuidIfNeeded(); + metaDataBuilder.generateClusterUuidIfNeeded(); if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) { blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 9ea7cf5e60b..523e9bc5414 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -114,7 +115,7 @@ public abstract class MetaDataStateFormat { CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION); out.writeInt(format.index()); out.writeLong(version); - try (XContentBuilder builder = newXContentBuilder(format, new org.elasticsearch.common.lucene.store.OutputStreamIndexOutput(out) { + try (XContentBuilder builder = newXContentBuilder(format, new IndexOutputOutputStream(out) { @Override public void close() throws IOException { // this is important since some of the XContentBuilders write bytes on close. diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 6fa20433283..a9362a56fe9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.*; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; @@ -52,9 +53,11 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction listener) { - execute(new Request(shardId, indexMetaData.getUUID(), nodesIds), listener); + execute(new Request(shardId, indexMetaData.getIndexUUID(), nodesIds), listener); } @Override @@ -118,9 +122,18 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, nodeEnv.availableShardPaths(request.shardId)); if (shardStateMetaData != null) { final IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name()); // it's a mystery why this is sometimes null - if (metaData != null && canOpenIndex(request.getShardId(), metaData) == false) { - logger.trace("{} can't open index for shard [{}]", shardId, shardStateMetaData); - return new NodeGatewayStartedShards(clusterService.localNode(), -1); + if (metaData != null) { + ShardPath shardPath = null; + try { + shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.settings()); + if (shardPath == null) { + throw new IllegalStateException(shardId + " no shard path found"); + } + Store.tryOpenIndex(shardPath.resolveIndex()); + } catch (Exception exception) { + logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception); + } } // old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata // is equal to IndexMetaData.INDEX_UUID_NA_VALUE otherwise this shard doesn't belong to the requested index. @@ -139,18 +152,6 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction } } - private boolean canOpenIndex(ShardId shardId, IndexMetaData metaData) throws IOException { - // try and see if we an list unallocated - if (metaData == null) { - return false; - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.settings()); - if (shardPath == null) { - return false; - } - return Store.canOpenIndex(logger, shardPath.resolveIndex()); - } - @Override protected boolean accumulateExceptions() { return true; @@ -269,29 +270,48 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public static class NodeGatewayStartedShards extends BaseNodeResponse { private long version = -1; + private Throwable storeException = null; NodeGatewayStartedShards() { } - public NodeGatewayStartedShards(DiscoveryNode node, long version) { + this(node, version, null); + } + + public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) { super(node); this.version = version; + this.storeException = storeException; } public long version() { return this.version; } + public Throwable storeException() { + return this.storeException; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); version = in.readLong(); + if (in.readBoolean()) { + storeException = in.readThrowable(); + } + } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(version); + if (storeException != null) { + out.writeBoolean(true); + out.writeThrowable(storeException); + } else { + out.writeBoolean(false); + } } } } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java index c5bcab55679..6e963261449 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java @@ -20,12 +20,10 @@ package org.elasticsearch.http.netty; import com.google.common.base.Strings; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.ReleaseChannelFutureListener; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent; @@ -34,7 +32,6 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.support.RestUtils; import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.*; import org.jboss.netty.handler.codec.http.*; @@ -100,7 +97,10 @@ public class NettyHttpChannel extends HttpChannel { String originHeader = request.header(ORIGIN); if (!Strings.isNullOrEmpty(originHeader)) { if (corsPattern == null) { - resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, "*")); + String allowedOrigins = transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, null); + if (!Strings.isNullOrEmpty(allowedOrigins)) { + resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins); + } } else { resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, corsPattern.matcher(originHeader).matches() ? originHeader : "null"); } diff --git a/core/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java b/core/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java index 705f9a40a34..24e910b905e 100644 --- a/core/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java +++ b/core/src/main/java/org/elasticsearch/index/AlreadyExpiredException.java @@ -36,7 +36,7 @@ public class AlreadyExpiredException extends ElasticsearchException implements I public AlreadyExpiredException(String index, String type, String id, long timestamp, long ttl, long now) { super("already expired [" + index + "]/[" + type + "]/[" + id + "] due to expire at [" + (timestamp + ttl) + "] and was processed at [" + now + "]"); - this.index = index; + this.setIndex(index); this.type = type; this.id = id; this.timestamp = timestamp; diff --git a/core/src/main/java/org/elasticsearch/index/IndexException.java b/core/src/main/java/org/elasticsearch/index/IndexException.java deleted file mode 100644 index c309ebcb7f1..00000000000 --- a/core/src/main/java/org/elasticsearch/index/IndexException.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; - -import java.io.IOException; - -/** - * - */ -public class IndexException extends ElasticsearchException { - - private final Index index; - - public IndexException(Index index, String msg, Object... args) { - this(index, msg, null, args); - } - - public IndexException(Index index, String msg, Throwable cause, Object... args) { - super(msg, cause); - this.index = index; - } - - public Index index() { - return index; - } - - @Override - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { - if (index != null) { - builder.field("index", index.getName()); - } - super.innerToXContent(builder, params); - } - - @Override - public String toString() { - return "[" + (index == null ? "_na" : index.name()) + "] " + getMessage(); - } - - - public IndexException(StreamInput in) throws IOException{ - super(in); - index = in.readBoolean() ? Index.readIndexName(in) : null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalStreamable(index); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreException.java b/core/src/main/java/org/elasticsearch/index/IndexNotFoundException.java similarity index 66% rename from core/src/main/java/org/elasticsearch/index/store/StoreException.java rename to core/src/main/java/org/elasticsearch/index/IndexNotFoundException.java index d221583e0d7..bc7e55d5a01 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreException.java +++ b/core/src/main/java/org/elasticsearch/index/IndexNotFoundException.java @@ -16,25 +16,25 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.index; -package org.elasticsearch.index.store; - +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ -public class StoreException extends IndexShardException { +public final class IndexNotFoundException extends ResourceNotFoundException { - public StoreException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + public IndexNotFoundException(String index) { + this(index, null); } - public StoreException(StreamInput in) throws IOException{ + public IndexNotFoundException(String index, Throwable cause) { + super("no such index", cause); + setIndex(index); + } + + public IndexNotFoundException(StreamInput in) throws IOException { super(in); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 560105455ab..3addeb59a40 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -173,10 +174,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone /** * Return the shard with the provided id, or throw an exception if it doesn't exist. */ - public IndexShard shardSafe(int shardId) throws IndexShardMissingException { + public IndexShard shardSafe(int shardId) { IndexShard indexShard = shard(shardId); if (indexShard == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); + throw new ShardNotFoundException(new ShardId(index, shardId)); } return indexShard; } @@ -242,16 +243,16 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone /** * Return the shard injector for the provided id, or throw an exception if there is no such shard. */ - public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException { + public Injector shardInjectorSafe(int shardId) { Tuple tuple = shards.get(shardId); if (tuple == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); + throw new ShardNotFoundException(new ShardId(index, shardId)); } return tuple.v2(); } public String indexUUID() { - return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); } // NOTE: O(numShards) cost, but numShards should be smallish? @@ -283,8 +284,20 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone boolean success = false; Injector shardInjector = null; try { - - ShardPath path = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings); + lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); + ShardPath path; + try { + path = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings); + } catch (IllegalStateException ex) { + logger.warn("{} failed to load shard path, trying to archive leftover", shardId); + try { + ShardPath.deleteLeftoverShardDirectory(logger, nodeEnv, lock, indexSettings); + path = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings); + } catch (Throwable t) { + t.addSuppressed(ex); + throw t; + } + } if (path == null) { path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, getAvgShardSizeInBytes(), this); logger.debug("{} creating using a new path [{}]", shardId, path); @@ -292,7 +305,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone logger.debug("{} creating using an existing path [{}]", shardId, path); } - lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); if (shards.containsKey(shardId.id())) { throw new IndexShardAlreadyExistsException(shardId + " already exists"); } @@ -316,9 +328,13 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone try { shardInjector = modules.createChildInjector(injector); } catch (CreationException e) { - throw new IndexShardCreationException(shardId, Injectors.getFirstErrorFailure(e)); + ElasticsearchException ex = new ElasticsearchException("failed to create shard", Injectors.getFirstErrorFailure(e)); + ex.setShard(shardId); + throw ex; } catch (Throwable e) { - throw new IndexShardCreationException(shardId, e); + ElasticsearchException ex = new ElasticsearchException("failed to create shard", e); + ex.setShard(shardId); + throw ex; } IndexShard indexShard = shardInjector.getInstance(IndexShard.class); @@ -328,8 +344,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone shards = newMapBuilder(shards).put(shardId.id(), new Tuple<>(indexShard, shardInjector)).immutableMap(); success = true; return indexShard; - } catch (IOException ex) { - throw new IndexShardCreationException(shardId, ex); + } catch (IOException e) { + ElasticsearchException ex = new ElasticsearchException("failed to create shard", e); + ex.setShard(shardId); + throw ex; } finally { if (success == false) { IOUtils.closeWhileHandlingException(lock); diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java index a856ed190a1..d7487ef66f1 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.engine; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,14 +28,15 @@ import java.io.IOException; /** * */ -public class EngineException extends IndexShardException { +public class EngineException extends ElasticsearchException { public EngineException(ShardId shardId, String msg) { - super(shardId, msg); + this(shardId, msg, null); } public EngineException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public EngineException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java b/core/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java deleted file mode 100644 index 223fa306697..00000000000 --- a/core/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.shard.IllegalIndexShardStateException; -import org.elasticsearch.index.store.Store; - -/** - * A special {@link RecoveryCounter} that flushes the engine when all - * recoveries have completed - */ -public final class FlushingRecoveryCounter extends RecoveryCounter { - - private final Engine engine; - private final ESLogger logger; - - FlushingRecoveryCounter(Engine engine, Store store, ESLogger logger) { - super(store); - this.engine = engine; - this.logger = logger; - } - - @Override - int endRecovery() { - int left = super.endRecovery(); - if (left == 0) { - try { - engine.flush(); - } catch (IllegalIndexShardStateException|FlushNotAllowedEngineException e) { - // we are being closed, or in created state, ignore - // OR, we are not allowed to perform flush, ignore - } catch (Throwable e) { - logger.warn("failed to flush shard post recovery", e); - } - } - return left; - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 73921e216b5..f89b9ce471a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -82,7 +81,7 @@ public class ShadowEngine extends Engine { this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; } else { - throw new IndexShardException(shardId, "failed to open a shadow engine after" + + throw new IllegalStateException("failed to open a shadow engine after" + nonexistentRetryTime + "ms, " + "directory is not an index"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index d00e42f00b6..2087872d454 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -49,7 +49,6 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals; import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentTypeListener; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Names; import org.elasticsearch.index.mapper.MapperService; @@ -95,7 +94,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData parentTypes; synchronized (lock) { @@ -398,6 +397,10 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData fieldNodeMap, Version indexVersionCreated, String message) { if (!fieldNodeMap.isEmpty()) { - if (indexVersionCreated.onOrAfter(Version.V_2_0_0)) { + if (indexVersionCreated.onOrAfter(Version.V_2_0_0_beta1)) { throw new MapperParsingException(message + getRemainingFields(fieldNodeMap)); } else { logger.debug(message + "{}", getRemainingFields(fieldNodeMap)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 78302804b1d..5f6893ed2aa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -224,7 +224,7 @@ public abstract class FieldMapper extends Mapper { } protected String buildIndexName(BuilderContext context) { - if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0)) { + if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) { return buildFullName(context); } String actualIndexName = indexName == null ? name : indexName; @@ -232,7 +232,7 @@ public abstract class FieldMapper extends Mapper { } protected String buildIndexNameClean(BuilderContext context) { - if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0)) { + if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) { return buildFullName(context); } return indexName == null ? name : indexName; @@ -253,7 +253,7 @@ public abstract class FieldMapper extends Mapper { fieldType.setFieldDataType(new FieldDataType(fieldType.fieldDataType().getType(), settings)); } boolean defaultDocValues = false; // pre 2.0 - if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0)) { + if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) { defaultDocValues = fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE; } // backcompat for "fielddata: format: docvalues" for now... @@ -279,7 +279,7 @@ public abstract class FieldMapper extends Mapper { protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName); assert indexSettings != null; - this.indexCreatedBefore2x = Version.indexCreated(indexSettings).before(Version.V_2_0_0); + this.indexCreatedBefore2x = Version.indexCreated(indexSettings).before(Version.V_2_0_0_beta1); this.fieldTypeRef = new MappedFieldTypeReference(fieldType); // the reference ctor freezes the field type defaultFieldType.freeze(); this.defaultFieldType = defaultFieldType; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 152bdca7575..fff041090a8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -460,7 +460,7 @@ public abstract class MappedFieldType extends FieldType { return new FuzzyQuery(createTerm(value), fuzziness.asDistance(BytesRefs.toString(value)), prefixLength, maxExpansions, transpositions); } - public Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { + public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { PrefixQuery query = new PrefixQuery(createTerm(value)); if (method != null) { query.setRewriteMethod(method); @@ -468,7 +468,7 @@ public abstract class MappedFieldType extends FieldType { return query; } - public Query regexpQuery(Object value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { + public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { RegexpQuery query = new RegexpQuery(createTerm(value), flags, maxDeterminizedStates); if (method != null) { query.setRewriteMethod(method); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 468176e40b4..30efe7b25ab 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -272,7 +272,7 @@ public class MapperService extends AbstractIndexComponent { if (mapper.type().contains(",")) { throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); } - if (Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0) && mapper.type().equals(mapper.parentFieldMapper().type())) { + if (Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0_beta1) && mapper.type().equals(mapper.parentFieldMapper().type())) { throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); } if (mapper.type().contains(".") && !PercolatorService.TYPE_NAME.equals(mapper.type())) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java index b6a015e521d..c3b22c6f512 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -67,7 +67,7 @@ public final class Mapping implements ToXContent { this.metadataMappers = metadataMappers; ImmutableMap.Builder, MetadataFieldMapper> builder = ImmutableMap.builder(); for (MetadataFieldMapper metadataMapper : metadataMappers) { - if (indexCreated.before(Version.V_2_0_0) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { + if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { root.putMapper(metadataMapper); } builder.put(metadataMapper.getClass(), metadataMapper); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 8f2fe5cb639..d225b3f6ae6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -79,7 +79,7 @@ public class BinaryFieldMapper extends FieldMapper { @Override public BinaryFieldMapper build(BuilderContext context) { setupFieldType(context); - ((BinaryFieldType)fieldType).setTryUncompressing(context.indexCreatedVersion().before(Version.V_2_0_0)); + ((BinaryFieldType)fieldType).setTryUncompressing(context.indexCreatedVersion().before(Version.V_2_0_0_beta1)); return new BinaryFieldMapper(name, fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } @@ -93,7 +93,7 @@ public class BinaryFieldMapper extends FieldMapper { for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String fieldName = entry.getKey(); - if (parserContext.indexVersionCreated().before(Version.V_2_0_0) && + if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1) && (parserContext.parseFieldMatcher().match(fieldName, COMPRESS) || parserContext.parseFieldMatcher().match(fieldName, COMPRESS_THRESHOLD))) { iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 5cf925c9e6b..6297c6a2a78 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -171,7 +171,7 @@ public class CompletionFieldMapper extends FieldMapper { continue; } if (Fields.ANALYZER.equals(fieldName) || // index_analyzer is for backcompat, remove for v3.0 - fieldName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + fieldName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { indexAnalyzer = getNamedAnalyzer(parserContext, fieldNode.toString()); iterator.remove(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index eccf9035a56..5a96541d91b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -125,7 +125,7 @@ public class DateFieldMapper extends NumberFieldMapper { } protected void setupFieldType(BuilderContext context) { - if (Version.indexCreated(context.indexSettings()).before(Version.V_2_0_0) && + if (Version.indexCreated(context.indexSettings()).before(Version.V_2_0_0_beta1) && !fieldType().dateTimeFormatter().format().contains("epoch_")) { String format = fieldType().timeUnit().equals(TimeUnit.SECONDS) ? "epoch_second" : "epoch_millis"; fieldType().setDateTimeFormatter(Joda.forPattern(format + "||" + fieldType().dateTimeFormatter().format())); @@ -183,7 +183,7 @@ public class DateFieldMapper extends NumberFieldMapper { } } if (!configuredFormat) { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { builder.dateTimeFormatter(Defaults.DATE_TIME_FORMATTER); } else { builder.dateTimeFormatter(Defaults.DATE_TIME_FORMATTER_BEFORE_2_0); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java index cac410ff7d3..5e7b664aceb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java @@ -83,7 +83,7 @@ public class Murmur3FieldMapper extends LongFieldMapper { Builder builder = murmur3Field(name); // tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { if (node.get("doc_values") != null) { throw new MapperParsingException("Setting [doc_values] cannot be modified for field [" + name + "]"); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index 91c877c0c98..0588bd1e044 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -69,7 +69,7 @@ public class TypeParsers { Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { pathType = parsePathType(name, fieldNode.toString()); iterator.remove(); } else if (fieldName.equals("fields")) { @@ -188,7 +188,7 @@ public class TypeParsers { Map.Entry entry = iterator.next(); final String propName = Strings.toUnderscoreCase(entry.getKey()); final Object propNode = entry.getValue(); - if (propName.equals("index_name") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (propName.equals("index_name") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.indexName(propNode.toString()); iterator.remove(); } else if (propName.equals("store")) { @@ -252,7 +252,7 @@ public class TypeParsers { builder.indexOptions(nodeIndexOptionValue(propNode)); iterator.remove(); } else if (propName.equals("analyzer") || // for backcompat, reading old indexes, remove for v3.0 - propName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + propName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString()); if (analyzer == null) { @@ -270,10 +270,10 @@ public class TypeParsers { } else if (propName.equals("include_in_all")) { builder.includeInAll(nodeBooleanValue(propNode)); iterator.remove(); - } else if (propName.equals("postings_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (propName.equals("postings_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { // ignore for old indexes iterator.remove(); - } else if (propName.equals("doc_values_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (propName.equals("doc_values_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { // ignore for old indexes iterator.remove(); } else if (propName.equals("similarity")) { @@ -301,7 +301,7 @@ public class TypeParsers { } public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { - if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.multiFieldPathType(parsePathType(name, propNode.toString())); return true; } else if (propName.equals("fields")) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index dabc109f2e1..0458c410b5b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -220,7 +220,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.multiFieldPathType(parsePathType(name, fieldNode.toString())); iterator.remove(); } else if (fieldName.equals("lat_lon")) { @@ -636,9 +636,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper double lon = context.parser().doubleValue(); token = context.parser().nextToken(); double lat = context.parser().doubleValue(); - while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY) { - - } + while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY); parse(context, sparse.reset(lat, lon), null); } else { while (token != XContentParser.Token.END_ARRAY) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index a462140d338..3e7aa39e42e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.elasticsearch.Version; +import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.SpatialStrategy; @@ -41,6 +42,8 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeMappingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -49,6 +52,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField; @@ -81,6 +85,7 @@ public class GeoShapeFieldMapper extends FieldMapper { public static final String DISTANCE_ERROR_PCT = "distance_error_pct"; public static final String ORIENTATION = "orientation"; public static final String STRATEGY = "strategy"; + public static final String COERCE = "coerce"; } public static class Defaults { @@ -90,6 +95,7 @@ public class GeoShapeFieldMapper extends FieldMapper { public static final int QUADTREE_LEVELS = GeoUtils.quadTreeLevelsForPrecision("50m"); public static final double LEGACY_DISTANCE_ERROR_PCT = 0.025d; public static final Orientation ORIENTATION = Orientation.RIGHT; + public static final Explicit COERCE = new Explicit<>(false, false); public static final MappedFieldType FIELD_TYPE = new GeoShapeFieldType(); @@ -108,6 +114,8 @@ public class GeoShapeFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder { + private Boolean coerce; + public Builder(String name) { super(name, Defaults.FIELD_TYPE); } @@ -116,21 +124,37 @@ public class GeoShapeFieldMapper extends FieldMapper { return (GeoShapeFieldType)fieldType; } + public Builder coerce(boolean coerce) { + this.coerce = coerce; + return builder; + } + + protected Explicit coerce(BuilderContext context) { + if (coerce != null) { + return new Explicit<>(coerce, true); + } + if (context.indexSettings() != null) { + return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.coerce", Defaults.COERCE.value()), false); + } + return Defaults.COERCE; + } + @Override public GeoShapeFieldMapper build(BuilderContext context) { GeoShapeFieldType geoShapeFieldType = (GeoShapeFieldType)fieldType; - if (geoShapeFieldType.tree.equals("quadtree") && context.indexCreatedVersion().before(Version.V_2_0_0)) { + if (geoShapeFieldType.tree.equals("quadtree") && context.indexCreatedVersion().before(Version.V_2_0_0_beta1)) { geoShapeFieldType.setTree("legacyquadtree"); } - if (context.indexCreatedVersion().before(Version.V_2_0_0) || + if (context.indexCreatedVersion().before(Version.V_2_0_0_beta1) || (geoShapeFieldType.treeLevels() == 0 && geoShapeFieldType.precisionInMeters() < 0)) { geoShapeFieldType.setDefaultDistanceErrorPct(Defaults.LEGACY_DISTANCE_ERROR_PCT); } setupFieldType(context); - return new GeoShapeFieldMapper(name, fieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + return new GeoShapeFieldMapper(name, fieldType, coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, + context), copyTo); } } @@ -161,6 +185,9 @@ public class GeoShapeFieldMapper extends FieldMapper { } else if (Names.STRATEGY.equals(fieldName)) { builder.fieldType().setStrategyName(fieldNode.toString()); iterator.remove(); + } else if (Names.COERCE.equals(fieldName)) { + builder.coerce(nodeBooleanValue(fieldNode)); + iterator.remove(); } } return builder; @@ -246,7 +273,7 @@ public class GeoShapeFieldMapper extends FieldMapper { termStrategy.setDistErrPct(distanceErrorPct()); defaultStrategy = resolveStrategy(strategyName); } - + @Override public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { super.checkCompatibility(fieldType, conflicts, strict); @@ -357,8 +384,12 @@ public class GeoShapeFieldMapper extends FieldMapper { } - public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + protected Explicit coerce; + + public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, Explicit coerce, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo); + this.coerce = coerce; } @Override @@ -397,6 +428,21 @@ public class GeoShapeFieldMapper extends FieldMapper { protected void parseCreateField(ParseContext context, List fields) throws IOException { } + @Override + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); + if (!this.getClass().equals(mergeWith.getClass())) { + return; + } + + GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; + if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { + if (gsfm.coerce.explicit()) { + this.coerce = gsfm.coerce; + } + } + } + @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { builder.field("type", contentType()); @@ -419,6 +465,13 @@ public class GeoShapeFieldMapper extends FieldMapper { if (includeDefaults || fieldType().orientation() != Defaults.ORIENTATION) { builder.field(Names.ORIENTATION, fieldType().orientation()); } + if (includeDefaults || coerce.explicit()) { + builder.field("coerce", coerce.value()); + } + } + + public Explicit coerce() { + return coerce; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 1ceee372a51..f872207c686 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -143,7 +143,7 @@ public class AllFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); iterator.remove(); - } else if (fieldName.equals("auto_boost") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("auto_boost") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { // Old 1.x setting which is now ignored iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 26414c4aaca..53c07c41309 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -113,7 +113,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { } Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); - if (parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { parseField(builder, builder.name, node, parserContext); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 463d2bdd2b4..63fa41faea1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -115,7 +115,7 @@ public class IdFieldMapper extends MetadataFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { throw new MapperParsingException(NAME + " is not configurable"); } Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); @@ -184,7 +184,7 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { + public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { if (indexOptions() != IndexOptions.NONE || context == null) { return super.prefixQuery(value, method, context); } @@ -201,7 +201,7 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public Query regexpQuery(Object value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { + public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { if (indexOptions() != IndexOptions.NONE || context == null) { return super.regexpQuery(value, flags, maxDeterminizedStates, method, context); } @@ -242,7 +242,7 @@ public class IdFieldMapper extends MetadataFieldMapper { return existing.clone(); } MappedFieldType fieldType = Defaults.FIELD_TYPE.clone(); - boolean pre2x = Version.indexCreated(indexSettings).before(Version.V_2_0_0); + boolean pre2x = Version.indexCreated(indexSettings).before(Version.V_2_0_0_beta1); if (pre2x && indexSettings.getAsBoolean("index.mapping._id.indexed", true) == false) { fieldType.setTokenized(false); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index abf93b6ae3c..2c506f24a55 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -102,7 +102,7 @@ public class IndexFieldMapper extends MetadataFieldMapper { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); - if (parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { parseField(builder, builder.name, node, parserContext); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index ad6a45ab594..5fcd10c0842 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -103,7 +103,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { throw new MapperParsingException("[_parent] field mapping must contain the [type] option"); } setupFieldType(context); - fieldType.setHasDocValues(context.indexCreatedVersion().onOrAfter(Version.V_2_0_0)); + fieldType.setHasDocValues(context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)); return new ParentFieldMapper(fieldType, type, context.indexSettings()); } } @@ -119,7 +119,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { if (fieldName.equals("type")) { builder.type(fieldNode.toString()); iterator.remove(); - } else if (fieldName.equals("postings_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("postings_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { // ignore before 2.0, reject on and after 2.0 iterator.remove(); } else if (fieldName.equals("fielddata")) { @@ -235,7 +235,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { static MappedFieldType setupDocValues(Settings indexSettings, MappedFieldType fieldType) { fieldType = fieldType.clone(); - fieldType.setHasDocValues(Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0)); + fieldType.setHasDocValues(Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0_beta1)); return fieldType; } @@ -249,7 +249,9 @@ public class ParentFieldMapper extends MetadataFieldMapper { @Override public void postParse(ParseContext context) throws IOException { - parse(context); + if (context.sourceToParse().flyweight() == false) { + parse(context); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 261a368ac45..a95329251cd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -102,7 +102,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); - if (parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { parseField(builder, builder.name, node, parserContext); } for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { @@ -112,7 +112,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { if (fieldName.equals("required")) { builder.required(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.path(fieldNode.toString()); iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java index 1bf0e2df209..a955ae8053f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java @@ -95,7 +95,7 @@ public class SizeFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); iterator.remove(); - } else if (fieldName.equals("store") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("store") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.store(parseStore(fieldName, fieldNode.toString())); iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index 37e17008f13..cecce40e489 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -155,12 +155,12 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (fieldNode != null) { builder.compress(nodeBooleanValue(fieldNode)); } iterator.remove(); - } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (fieldNode != null) { if (fieldNode instanceof Number) { builder.compressThreshold(((Number) fieldNode).longValue()); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index ea25981f70e..253cc6dfcbf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -142,7 +142,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { @Override public TimestampFieldMapper build(BuilderContext context) { - if (explicitStore == false && context.indexCreatedVersion().before(Version.V_2_0_0)) { + if (explicitStore == false && context.indexCreatedVersion().before(Version.V_2_0_0_beta1)) { fieldType.setStored(false); } @@ -158,7 +158,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { private static FormatDateTimeFormatter getDateTimeFormatter(Settings indexSettings) { Version indexCreated = Version.indexCreated(indexSettings); - if (indexCreated.onOrAfter(Version.V_2_0_0)) { + if (indexCreated.onOrAfter(Version.V_2_0_0_beta1)) { return Defaults.DATE_TIME_FORMATTER; } else { return Defaults.DATE_TIME_FORMATTER_BEFORE_2_0; @@ -169,7 +169,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); - if (parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { parseField(builder, builder.name, node, parserContext); } boolean defaultSet = false; @@ -182,7 +182,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED; builder.enabled(enabledState); iterator.remove(); - } else if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + } else if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.path(fieldNode.toString()); iterator.remove(); } else if (fieldName.equals("format")) { @@ -246,7 +246,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { if (existing != null) { return existing; } - return Version.indexCreated(settings).onOrAfter(Version.V_2_0_0) ? Defaults.FIELD_TYPE : Defaults.PRE_20_FIELD_TYPE; + return Version.indexCreated(settings).onOrAfter(Version.V_2_0_0_beta1) ? Defaults.FIELD_TYPE : Defaults.PRE_20_FIELD_TYPE; } private EnabledAttributeMapper enabledState; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index f5d4817ca1c..480d2a41818 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -94,7 +94,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { throw new MapperParsingException(NAME + " is not configurable"); } Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index 75d01407a64..92688c213b9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -87,7 +87,7 @@ public class UidFieldMapper extends MetadataFieldMapper { @Override public UidFieldMapper build(BuilderContext context) { setupFieldType(context); - fieldType.setHasDocValues(context.indexCreatedVersion().before(Version.V_2_0_0)); + fieldType.setHasDocValues(context.indexCreatedVersion().before(Version.V_2_0_0_beta1)); return new UidFieldMapper(fieldType, defaultFieldType, context.indexSettings()); } } @@ -95,7 +95,7 @@ public class UidFieldMapper extends MetadataFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { throw new MapperParsingException(NAME + " is not configurable"); } Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 043890f8d88..0fbf2a3a83c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -79,7 +79,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); - if (fieldName.equals("doc_values_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (fieldName.equals("doc_values_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { // ignore in 1.x, reject in 2.x iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 30592f4e9fe..70c3276a56f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -236,7 +236,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) { - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { + if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { builder.pathType(parsePathType(name, fieldNode.toString())); return true; } diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java index 65019be247b..3813679d81c 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java @@ -18,19 +18,20 @@ */ package org.elasticsearch.index.percolator; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import java.io.IOException; /** * Exception during indexing a percolator query. */ -public class PercolatorException extends IndexException { +public class PercolatorException extends ElasticsearchException { public PercolatorException(Index index, String msg, Throwable cause) { - super(index, msg, cause); + super(msg, cause); + setIndex(index); } public PercolatorException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryParser.java index 959a5af0e71..b1671b7b0cf 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryParser.java @@ -91,6 +91,9 @@ public class GeoPolygonQueryParser extends BaseQueryParserTemp { while ((token = parser.nextToken()) != Token.END_ARRAY) { shell.add(GeoUtils.parseGeoPoint(parser)); } + if (!shell.get(shell.size()-1).equals(shell.get(0))) { + shell.add(shell.get(0)); + } } else { throw new QueryParsingException(parseContext, "[geo_polygon] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index 8c12c370a7d..9e379978e38 100644 --- a/core/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -177,7 +177,7 @@ public class HasChildQueryParser extends BaseQueryParserTemp { final Query query; final ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper.fieldType()); - if (parseContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parseContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { query = joinUtilHelper(parentType, parentChildIndexFieldData, parentDocMapper.typeFilter(), scoreType, innerQuery, minChildren, maxChildren); } else { // TODO: use the query API diff --git a/core/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index 7ff91d64c99..5b69eec9de3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -201,7 +201,7 @@ public class HasParentQueryParser extends BaseQueryParserTemp { // wrap the query with type query innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter()); Filter childrenFilter = new QueryWrapperFilter(Queries.not(parentFilter)); - if (parseContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (parseContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { ScoreType scoreMode = score ? ScoreType.MAX : ScoreType.NONE; return joinUtilHelper(parentType, parentChildIndexFieldData, childrenFilter, scoreMode, innerQuery, 0, Integer.MAX_VALUE); } else { diff --git a/core/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java index 10013491298..54e09f49478 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.Inject; @@ -43,10 +44,12 @@ public class IndicesQueryParser extends BaseQueryParserTemp { @Nullable private final ClusterService clusterService; + private final IndexNameExpressionResolver indexNameExpressionResolver; @Inject - public IndicesQueryParser(@Nullable ClusterService clusterService) { + public IndicesQueryParser(@Nullable ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver) { this.clusterService = clusterService; + this.indexNameExpressionResolver = indexNameExpressionResolver; } @Override @@ -153,7 +156,7 @@ public class IndicesQueryParser extends BaseQueryParserTemp { } protected boolean matchesIndices(String currentIndex, String... indices) { - final String[] concreteIndices = clusterService.state().metaData().concreteIndices(IndicesOptions.lenientExpandOpen(), indices); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterService.state(), IndicesOptions.lenientExpandOpen(), indices); for (String index : concreteIndices) { if (Regex.simpleMatch(index, currentIndex)) { return true; diff --git a/core/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java index ce2796513d9..47bbc3cfff4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java @@ -53,7 +53,7 @@ public class PrefixQueryParser extends BaseQueryParserTemp { String rewriteMethod = null; String queryName = null; - Object value = null; + String value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String currentFieldName = null; XContentParser.Token token; @@ -71,7 +71,7 @@ public class PrefixQueryParser extends BaseQueryParserTemp { if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else if ("value".equals(currentFieldName) || "prefix".equals(currentFieldName)) { - value = parser.objectBytes(); + value = parser.textOrNull(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else if ("rewrite".equals(currentFieldName)) { @@ -86,7 +86,7 @@ public class PrefixQueryParser extends BaseQueryParserTemp { queryName = parser.text(); } else { fieldName = currentFieldName; - value = parser.objectBytes(); + value = parser.textOrNull(); } } } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/core/src/main/java/org/elasticsearch/index/query/QueryParsingException.java index f904b740948..c606953bca8 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParsingException.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParsingException.java @@ -19,13 +19,13 @@ package org.elasticsearch.index.query; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -33,7 +33,7 @@ import java.io.IOException; /** * */ -public class QueryParsingException extends IndexException { +public class QueryParsingException extends ElasticsearchException { static final int UNKNOWN_POSITION = -1; private final int lineNumber; @@ -44,7 +44,8 @@ public class QueryParsingException extends IndexException { } public QueryParsingException(QueryParseContext parseContext, String msg, Throwable cause, Object... args) { - super(parseContext.index(), msg, cause, args); + super(msg, cause, args); + setIndex(parseContext.index()); int lineNumber = UNKNOWN_POSITION; int columnNumber = UNKNOWN_POSITION; XContentParser parser = parseContext.parser(); @@ -64,7 +65,8 @@ public class QueryParsingException extends IndexException { * {@link QueryParseContext} may not be available */ public QueryParsingException(Index index, int line, int col, String msg, Throwable cause) { - super(index, msg, cause); + super(msg, cause); + setIndex(index); this.lineNumber = line; this.columnNumber = col; } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index ee9f52615dc..4b2f93fb7c9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -178,7 +178,7 @@ public class QueryStringQueryParser extends BaseQueryParserTemp { } else if ("phrase_slop".equals(currentFieldName) || "phraseSlop".equals(currentFieldName)) { qpSettings.phraseSlop(parser.intValue()); } else if (parseContext.parseFieldMatcher().match(currentFieldName, FUZZINESS)) { - qpSettings.fuzzyMinSim(Fuzziness.parse(parser).asSimilarity()); + qpSettings.setFuzziness(Fuzziness.parse(parser)); } else if ("boost".equals(currentFieldName)) { qpSettings.boost(parser.floatValue()); } else if ("tie_breaker".equals(currentFieldName) || "tieBreaker".equals(currentFieldName)) { diff --git a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java index 6e0b6e75e38..dcb822e9889 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java @@ -55,7 +55,7 @@ public class RegexpQueryParser extends BaseQueryParserTemp { String fieldName = parser.currentName(); String rewriteMethod = null; - Object value = null; + String value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; int flagsValue = DEFAULT_FLAGS_VALUE; int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; @@ -74,7 +74,7 @@ public class RegexpQueryParser extends BaseQueryParserTemp { currentFieldName = parser.currentName(); } else { if ("value".equals(currentFieldName)) { - value = parser.objectBytes(); + value = parser.textOrNull(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else if ("rewrite".equals(currentFieldName)) { @@ -98,7 +98,7 @@ public class RegexpQueryParser extends BaseQueryParserTemp { queryName = parser.text(); } else { fieldName = currentFieldName; - value = parser.objectBytes(); + value = parser.textOrNull(); } } } diff --git a/core/src/main/java/org/elasticsearch/index/query/TermQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/TermQueryParser.java index 625c78c8037..9127c011ea3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermQueryParser.java @@ -55,6 +55,9 @@ public class TermQueryParser extends BaseQueryParser { // skip } else if (token == XContentParser.Token.START_OBJECT) { // also support a format of "term" : {"field_name" : { ... }} + if (fieldName != null) { + throw new QueryParsingException(parseContext, "[term] query does not support different field names, use [bool] query instead"); + } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -79,6 +82,9 @@ public class TermQueryParser extends BaseQueryParser { } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { + if (fieldName != null) { + throw new QueryParsingException(parseContext, "[term] query does not support different field names, use [bool] query instead"); + } fieldName = currentFieldName; value = parser.objectBytes(); } diff --git a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java index 38ce87e882b..2ac10b80be3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java @@ -101,7 +101,6 @@ public class WildcardQueryParser extends BaseQueryParserTemp { WildcardQuery wildcardQuery = new WildcardQuery(new Term(fieldName, valueBytes)); QueryParsers.setRewriteMethod(wildcardQuery, parseContext.parseFieldMatcher(), rewriteMethod); - wildcardQuery.setRewriteMethod(QueryParsers.parseRewriteMethod(parseContext.parseFieldMatcher(), rewriteMethod)); wildcardQuery.setBoost(boost); if (queryName != null) { parseContext.addNamedQuery(queryName, wildcardQuery); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java index 0cde08f43c4..31c235e09ec 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; @@ -28,17 +30,17 @@ import java.io.IOException; /** * */ -public class IllegalIndexShardStateException extends IndexShardException { +public class IllegalIndexShardStateException extends ElasticsearchException { private final IndexShardState currentState; public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg) { - super(shardId, "CurrentState[" + currentState + "] " + msg); - this.currentState = currentState; + this(shardId, currentState, msg, null); } public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex) { - super(shardId, "CurrentState[" + currentState + "] ", ex); + super("CurrentState[" + currentState + "] " + msg, ex); + setShard(shardId); this.currentState = currentState; } @@ -46,11 +48,6 @@ public class IllegalIndexShardStateException extends IndexShardException { return currentState; } - @Override - public RestStatus status() { - return RestStatus.NOT_FOUND; - } - public IllegalIndexShardStateException(StreamInput in) throws IOException{ super(in); currentState = IndexShardState.fromId(in.readByte()); @@ -61,4 +58,9 @@ public class IllegalIndexShardStateException extends IndexShardException { super.writeTo(out); out.writeByte(currentState.id()); } + + @Override + public RestStatus status() { + return RestStatus.NOT_FOUND; + } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index be2563fa5c0..dd4b6bc7dc9 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -24,11 +24,13 @@ import com.google.common.base.Preconditions; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.ThreadInterruptedException; +import org.elasticsearch.ElasticsearchCorruptionException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -104,6 +106,7 @@ import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat; import org.elasticsearch.search.suggest.completion.CompletionStats; @@ -839,7 +842,11 @@ public class IndexShard extends AbstractIndexShardComponent { recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); // also check here, before we apply the translog if (Booleans.parseBoolean(checkIndexOnStartup, false)) { - checkIndex(); + try { + checkIndex(); + } catch (IOException ex) { + throw new RecoveryFailedException(recoveryState, "check index failed", ex); + } } recoveryState.setStage(RecoveryState.Stage.TRANSLOG); // we disable deletes since we allow for operations to be executed against the shard while recovering @@ -1182,19 +1189,17 @@ public class IndexShard extends AbstractIndexShardComponent { } } - private void checkIndex() throws IndexShardException { + private void checkIndex() throws IOException { if (store.tryIncRef()) { try { doCheckIndex(); - } catch (IOException e) { - throw new IndexShardException(shardId, "exception during checkindex", e); } finally { store.decRef(); } } } - private void doCheckIndex() throws IndexShardException, IOException { + private void doCheckIndex() throws IOException { long timeNS = System.nanoTime(); if (!Lucene.indexExists(store.directory())) { return; @@ -1204,7 +1209,7 @@ public class IndexShard extends AbstractIndexShardComponent { if ("checksum".equalsIgnoreCase(checkIndexOnStartup)) { // physical verification only: verify all checksums for the latest commit - boolean corrupt = false; + IOException corrupt = null; MetadataSnapshot metadata = store.getMetadata(); for (Map.Entry entry : metadata.asMap().entrySet()) { try { @@ -1213,13 +1218,13 @@ public class IndexShard extends AbstractIndexShardComponent { } catch (IOException exc) { out.println("checksum failed: " + entry.getKey()); exc.printStackTrace(out); - corrupt = true; + corrupt = exc; } } out.flush(); - if (corrupt) { + if (corrupt != null) { logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8)); - throw new IndexShardException(shardId, "index check failure"); + throw corrupt; } } else { // full checkindex @@ -1244,7 +1249,7 @@ public class IndexShard extends AbstractIndexShardComponent { } } else { // only throw a failure if we are not going to fix the index - throw new IndexShardException(shardId, "index check failure"); + throw new IllegalStateException("index check failure but can't fix it"); } } } @@ -1337,10 +1342,10 @@ public class IndexShard extends AbstractIndexShardComponent { } private String getIndexUUID() { - assert indexSettings.get(IndexMetaData.SETTING_UUID) != null + assert indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) != null || indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_0_90_6) : - "version: " + indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) + " uuid: " + indexSettings.get(IndexMetaData.SETTING_UUID); - return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + "version: " + indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) + " uuid: " + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID); + return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); } private Tuple docMapper(String type) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardException.java deleted file mode 100644 index 23fde2abc10..00000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardException.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.IndexException; - -import java.io.IOException; - -/** - * - */ -public class IndexShardException extends IndexException { - - private final ShardId shardId; - - public IndexShardException(ShardId shardId, String msg) { - this(shardId, msg, null); - } - - public IndexShardException(ShardId shardId, String msg, Throwable cause) { - super(shardId == null ? null : shardId.index(), msg, cause); - this.shardId = shardId; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String toString() { - return (shardId == null ? "_na" : shardId) + " " + getMessage(); - } - - @Override - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { - if (shardId != null) { - builder.field("shard", shardId.getId()); - } - super.innerToXContent(builder, params); - } - - public IndexShardException(StreamInput in) throws IOException{ - super(in); - if (in.readBoolean()) { - shardId = ShardId.readShardId(in); - } else { - shardId = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalStreamable(shardId); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java index d53d1eaa6a5..8ed3c95f92a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java @@ -19,18 +19,18 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** * */ -public class IndexShardRecoveryException extends IndexShardException { +public class IndexShardRecoveryException extends ElasticsearchException { public IndexShardRecoveryException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public IndexShardRecoveryException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java similarity index 69% rename from core/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java rename to core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java index a6d879e5b01..fa2c8ce7103 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardCreationException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java @@ -19,19 +19,25 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; /** + * */ -public class IndexShardCreationException extends IndexShardException { - - public IndexShardCreationException(ShardId shardId, Throwable cause) { - super(shardId, "failed to create shard", cause); +public class ShardNotFoundException extends ResourceNotFoundException { + public ShardNotFoundException(ShardId shardId) { + this(shardId, null); } - public IndexShardCreationException(StreamInput in) throws IOException{ + public ShardNotFoundException(ShardId shardId, Throwable ex) { + super("no such shard", ex); + setShard(shardId); + + } + public ShardNotFoundException(StreamInput in) throws IOException{ super(in); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 5e27ce972b0..e22659cbd53 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -18,20 +18,19 @@ */ package org.elasticsearch.index.shard; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.settings.IndexSettings; import java.io.IOException; import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; public final class ShardPath { @@ -85,13 +84,13 @@ public final class ShardPath { * Note: this method resolves custom data locations for the shard. */ public static ShardPath loadShardPath(ESLogger logger, NodeEnvironment env, ShardId shardId, @IndexSettings Settings indexSettings) throws IOException { - final String indexUUID = indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); final Path[] paths = env.availableShardPaths(shardId); Path loadedPath = null; for (Path path : paths) { ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, path); if (load != null) { - if ((load.indexUUID.equals(indexUUID) || IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID)) == false) { + if (load.indexUUID.equals(indexUUID) == false && IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) { logger.warn("{} found shard on path: [{}] with a different index UUID - this shard seems to be leftover from a different index with the same name. Remove the leftover shard in order to reuse the path with the current index", shardId, path); throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " expected: " + indexUUID + " on shard path: " + path); } @@ -118,6 +117,26 @@ public final class ShardPath { } } + /** + * This method tries to delete left-over shards where the index name has been reused but the UUID is different + * to allow the new shard to be allocated. + */ + public static void deleteLeftoverShardDirectory(ESLogger logger, NodeEnvironment env, ShardLock lock, @IndexSettings Settings indexSettings) throws IOException { + final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + final Path[] paths = env.availableShardPaths(lock.getShardId()); + for (Path path : paths) { + ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, path); + if (load != null) { + if (load.indexUUID.equals(indexUUID) == false && IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) { + logger.warn("{} deleting leftover shard on path: [{}] with a different index UUID", lock.getShardId(), path); + assert Files.isDirectory(path) : path + " is not a directory"; + NodeEnvironment.acquireFSLockForPaths(indexSettings, paths); + IOUtils.rm(path); + } + } + } + } + /** Maps each path.data path to a "guess" of how many bytes the shards allocated to that path might additionally use over their * lifetime; we do this so a bunch of newly allocated shards won't just all go the path with the most free space at this moment. */ private static Map getEstimatedReservedBytes(NodeEnvironment env, long avgShardSizeInBytes, Iterable shards) throws IOException { @@ -151,8 +170,8 @@ public final class ShardPath { final Path dataPath; final Path statePath; - - final String indexUUID = indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); + + final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); if (NodeEnvironment.hasCustomDataPath(indexSettings)) { dataPath = env.resolveCustomLocation(indexSettings, shardId); diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecoveryService.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecoveryService.java index 32275c4c1fd..14b27efc8e9 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecoveryService.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecoveryService.java @@ -317,7 +317,7 @@ public class StoreRecoveryService extends AbstractIndexShardComponent implements if (!shardId.getIndex().equals(restoreSource.index())) { snapshotShardId = new ShardId(restoreSource.index(), shardId.id()); } - indexShardRepository.restore(restoreSource.snapshotId(), shardId, snapshotShardId, recoveryState); + indexShardRepository.restore(restoreSource.snapshotId(), restoreSource.version(), shardId, snapshotShardId, recoveryState); indexShard.skipTranslogRecovery(true); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 91da224ec31..1a54c748c33 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -90,12 +90,13 @@ public class TranslogRecoveryPerformer { return numOps; } - public static class BatchOperationException extends IndexShardException { + public static class BatchOperationException extends ElasticsearchException { private final int completedOperations; public BatchOperationException(ShardId shardId, String msg, int completedOperations, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); this.completedOperations = completedOperations; } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java index 224019a26bb..7c778846926 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.snapshots; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.shard.ShardId; @@ -35,7 +36,7 @@ public interface IndexShardRepository { /** * Creates a snapshot of the shard based on the index commit point. *

- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#snapshotIndex()} method. + * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#snapshotIndex} method. * IndexShardRepository implementations shouldn't release the snapshot index commit point. It is done by the method caller. *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check @@ -55,19 +56,21 @@ public interface IndexShardRepository { * * @param snapshotId snapshot id * @param shardId shard id (in the current index) + * @param version version of elasticsearch that created this snapshot * @param snapshotShardId shard id (in the snapshot) * @param recoveryState recovery state */ - void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState); + void restore(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState); /** * Retrieve shard snapshot status for the stored snapshot * * @param snapshotId snapshot id + * @param version version of elasticsearch that created this snapshot * @param shardId shard id * @return snapshot status */ - IndexShardSnapshotStatus snapshotStatus(SnapshotId snapshotId, ShardId shardId); + IndexShardSnapshotStatus snapshotStatus(SnapshotId snapshotId, Version version, ShardId shardId); /** * Verifies repository settings on data node diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java index 92a2ed4ab98..55410b8cb9d 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRestoreException.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.snapshots; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,13 +28,14 @@ import java.io.IOException; /** * Generic shard restore exception */ -public class IndexShardRestoreException extends IndexShardException { +public class IndexShardRestoreException extends ElasticsearchException { public IndexShardRestoreException(ShardId shardId, String msg) { - super(shardId, msg); + this(shardId, msg, null); } public IndexShardRestoreException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public IndexShardRestoreException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java index e915d227538..741350966a5 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotException.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.snapshots; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,13 +28,14 @@ import java.io.IOException; /** * Generic shard snapshot exception */ -public class IndexShardSnapshotException extends IndexShardException { +public class IndexShardSnapshotException extends ElasticsearchException { public IndexShardSnapshotException(ShardId shardId, String msg) { - super(shardId, msg); + this(shardId, msg, null); } public IndexShardSnapshotException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public IndexShardSnapshotException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index b9245afa22f..c870b214500 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -46,7 +47,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; @@ -63,6 +63,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.repositories.blobstore.*; import java.io.FilterInputStream; import java.io.IOException; @@ -72,7 +73,6 @@ import java.util.*; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.testBlobPrefix; -import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.toStreamOutput; /** * Blob store based implementation of IndexShardRepository @@ -104,14 +104,30 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements private final ParseFieldMatcher parseFieldMatcher; - protected static final String SNAPSHOT_PREFIX = "snapshot-"; + protected static final String LEGACY_SNAPSHOT_PREFIX = "snapshot-"; + + protected static final String LEGACY_SNAPSHOT_NAME_FORMAT = LEGACY_SNAPSHOT_PREFIX + "%s"; + + protected static final String SNAPSHOT_PREFIX = "snap-"; + + protected static final String SNAPSHOT_NAME_FORMAT = SNAPSHOT_PREFIX + "%s.dat"; + + protected static final String SNAPSHOT_CODEC = "snapshot"; protected static final String SNAPSHOT_INDEX_PREFIX = "index-"; - protected static final String SNAPSHOT_TEMP_PREFIX = "pending-"; + protected static final String SNAPSHOT_INDEX_NAME_FORMAT = SNAPSHOT_INDEX_PREFIX + "%s"; + + protected static final String SNAPSHOT_INDEX_CODEC = "snapshots"; protected static final String DATA_BLOB_PREFIX = "__"; + private ChecksumBlobStoreFormat indexShardSnapshotFormat; + + private LegacyBlobStoreFormat indexShardSnapshotLegacyFormat; + + private ChecksumBlobStoreFormat indexShardSnapshotsFormat; + @Inject public BlobStoreIndexShardRepository(Settings settings, RepositoryName repositoryName, IndicesService indicesService, ClusterService clusterService) { super(settings); @@ -144,6 +160,9 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements } }; this.compress = compress; + indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress()); + indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher); + indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress()); } /** @@ -174,8 +193,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * {@inheritDoc} */ @Override - public void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { - final RestoreContext snapshotContext = new RestoreContext(snapshotId, shardId, snapshotShardId, recoveryState); + public void restore(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { + final RestoreContext snapshotContext = new RestoreContext(snapshotId, version, shardId, snapshotShardId, recoveryState); try { snapshotContext.restore(); } catch (Throwable e) { @@ -187,8 +206,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * {@inheritDoc} */ @Override - public IndexShardSnapshotStatus snapshotStatus(SnapshotId snapshotId, ShardId shardId) { - Context context = new Context(snapshotId, shardId); + public IndexShardSnapshotStatus snapshotStatus(SnapshotId snapshotId, Version version, ShardId shardId) { + Context context = new Context(snapshotId, version, shardId); BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); IndexShardSnapshotStatus status = new IndexShardSnapshotStatus(); status.updateStage(IndexShardSnapshotStatus.Stage.DONE); @@ -223,8 +242,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * @param snapshotId snapshot id * @param shardId shard id */ - public void delete(SnapshotId snapshotId, ShardId shardId) { - Context context = new Context(snapshotId, shardId, shardId); + public void delete(SnapshotId snapshotId, Version version, ShardId shardId) { + Context context = new Context(snapshotId, version, shardId, shardId); context.delete(); } @@ -236,58 +255,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements ']'; } - /** - * Returns shard snapshot metadata file name - * - * @param snapshotId snapshot id - * @return shard snapshot metadata file name - */ - private String snapshotBlobName(SnapshotId snapshotId) { - return SNAPSHOT_PREFIX + snapshotId.getSnapshot(); - } - - /** - * Serializes snapshot to JSON - * - * @param snapshot snapshot - * @param output the stream to output the snapshot JSON representation to - * @throws IOException if an IOException occurs - */ - public void writeSnapshot(BlobStoreIndexShardSnapshot snapshot, StreamOutput output) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, output).prettyPrint(); - BlobStoreIndexShardSnapshot.toXContent(snapshot, builder, ToXContent.EMPTY_PARAMS); - builder.flush(); - } - - /** - * Parses JSON representation of a snapshot - * - * @param stream JSON - * @return snapshot - * @throws IOException if an IOException occurs - */ - public static BlobStoreIndexShardSnapshot readSnapshot(InputStream stream, ParseFieldMatcher parseFieldMatcher) throws IOException { - byte[] data = ByteStreams.toByteArray(stream); - try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) { - parser.nextToken(); - return BlobStoreIndexShardSnapshot.fromXContent(parser, parseFieldMatcher); - } - } - - /** - * Parses JSON representation of a snapshot - * - * @param stream JSON - * @return snapshot - * @throws IOException if an IOException occurs - * */ - public static BlobStoreIndexShardSnapshots readSnapshots(InputStream stream, ParseFieldMatcher parseFieldMatcher) throws IOException { - byte[] data = ByteStreams.toByteArray(stream); - try (XContentParser parser = XContentHelper.createParser(new BytesArray(data))) { - parser.nextToken(); - return BlobStoreIndexShardSnapshots.fromXContent(parser, parseFieldMatcher); - } - } /** * Returns true if metadata files should be compressed * @@ -297,6 +264,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements return compress; } + BlobStoreFormat indexShardSnapshotFormat(Version version) { + if (BlobStoreRepository.legacyMetaData(version)) { + return indexShardSnapshotLegacyFormat; + } else { + return indexShardSnapshotFormat; + } + } + /** * Context for snapshot/restore operations */ @@ -308,12 +283,15 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements protected final BlobContainer blobContainer; - public Context(SnapshotId snapshotId, ShardId shardId) { - this(snapshotId, shardId, shardId); + protected final Version version; + + public Context(SnapshotId snapshotId, Version version, ShardId shardId) { + this(snapshotId, version, shardId, shardId); } - public Context(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId) { + public Context(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId) { this.snapshotId = snapshotId; + this.version = version; this.shardId = shardId; blobContainer = blobStore.blobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId()))); } @@ -333,10 +311,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements BlobStoreIndexShardSnapshots snapshots = tuple.v1(); int fileListGeneration = tuple.v2(); - String commitPointName = snapshotBlobName(snapshotId); - try { - blobContainer.deleteBlob(commitPointName); + indexShardSnapshotFormat(version).delete(blobContainer, snapshotId.getSnapshot()); } catch (IOException e) { logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId); } @@ -356,13 +332,11 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * Loads information about shard snapshot */ public BlobStoreIndexShardSnapshot loadSnapshot() { - BlobStoreIndexShardSnapshot snapshot; - try (InputStream stream = blobContainer.openInput(snapshotBlobName(snapshotId))) { - snapshot = readSnapshot(stream, parseFieldMatcher); + try { + return indexShardSnapshotFormat(version).read(blobContainer, snapshotId.getSnapshot()); } catch (IOException ex) { throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex); } - return snapshot; } /** @@ -381,7 +355,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements // delete old index files first for (String blobName : blobs.keySet()) { // delete old file lists - if (blobName.startsWith(SNAPSHOT_TEMP_PREFIX) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) { + if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) { try { blobContainer.deleteBlob(blobName); } catch (IOException e) { @@ -408,20 +382,11 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements // If we deleted all snapshots - we don't need to create the index file if (snapshots.size() > 0) { - String newSnapshotIndexName = SNAPSHOT_INDEX_PREFIX + fileListGeneration; - try (OutputStream output = blobContainer.createOutput(SNAPSHOT_TEMP_PREFIX + fileListGeneration)) { - StreamOutput stream = compressIfNeeded(output); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream); - newSnapshots.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.flush(); + try { + indexShardSnapshotsFormat.writeAtomic(newSnapshots, blobContainer, Integer.toString(fileListGeneration)); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write file list", e); } - try { - blobContainer.move(SNAPSHOT_TEMP_PREFIX + fileListGeneration, newSnapshotIndexName); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to rename file list", e); - } } } @@ -481,8 +446,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements } } if (latest >= 0) { - try (InputStream stream = blobContainer.openInput(SNAPSHOT_INDEX_PREFIX + latest)) { - return new Tuple<>(readSnapshots(stream, parseFieldMatcher), latest); + try { + return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest); } catch (IOException e) { logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); } @@ -491,22 +456,22 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements // We couldn't load the index file - falling back to loading individual snapshots List snapshots = Lists.newArrayList(); for (String name : blobs.keySet()) { - if (name.startsWith(SNAPSHOT_PREFIX)) { - try (InputStream stream = blobContainer.openInput(name)) { - BlobStoreIndexShardSnapshot snapshot = readSnapshot(stream, parseFieldMatcher); - snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - } catch (IOException e) { - logger.warn("failed to read commit point [{}]", e, name); + try { + BlobStoreIndexShardSnapshot snapshot = null; + if (name.startsWith(SNAPSHOT_PREFIX)) { + snapshot = indexShardSnapshotFormat.readBlob(blobContainer, name); + } else if (name.startsWith(LEGACY_SNAPSHOT_PREFIX)) { + snapshot = indexShardSnapshotLegacyFormat.readBlob(blobContainer, name); } + if (snapshot != null) { + snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); + } + } catch (IOException e) { + logger.warn("failed to read commit point [{}]", e, name); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); } - - protected StreamOutput compressIfNeeded(OutputStream output) throws IOException { - return toStreamOutput(output, isCompress()); - } - } /** @@ -526,7 +491,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * @param snapshotStatus snapshot status to report progress */ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { - super(snapshotId, shardId); + super(snapshotId, Version.CURRENT, shardId); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); store = indexService.shardInjectorSafe(shardId.id()).getInstance(Store.class); this.snapshotStatus = snapshotStatus; @@ -627,15 +592,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements // now create and write the commit point snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE); - String snapshotBlobName = snapshotBlobName(snapshotId); BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getSnapshot(), snapshotIndexCommit.getGeneration(), indexCommitPointFiles, snapshotStatus.startTime(), // snapshotStatus.startTime() is assigned on the same machine, so it's safe to use with VLong System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize); //TODO: The time stored in snapshot doesn't include cleanup time. logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); - try (StreamOutput output = compressIfNeeded(blobContainer.createOutput(snapshotBlobName))) { - writeSnapshot(snapshot, output); + try { + indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getSnapshot()); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); } @@ -815,8 +779,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements * @param snapshotShardId shard in the snapshot that data should be restored from * @param recoveryState recovery state to report progress */ - public RestoreContext(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { - super(snapshotId, shardId, snapshotShardId); + public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { + super(snapshotId, version, shardId, snapshotShardId); store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class); this.recoveryState = recoveryState; } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 6bab901ed5c..0e997c14ec0 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -29,10 +29,7 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.store.StoreFileMetaData; import java.io.IOException; @@ -43,7 +40,9 @@ import static com.google.common.collect.Lists.newArrayList; /** * Shard snapshot metadata */ -public class BlobStoreIndexShardSnapshot { +public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuilder { + + public static final BlobStoreIndexShardSnapshot PROTO = new BlobStoreIndexShardSnapshot(); /** * Information about snapshotted file @@ -350,6 +349,19 @@ public class BlobStoreIndexShardSnapshot { this.totalSize = totalSize; } + /** + * Special constructor for the prototype + */ + private BlobStoreIndexShardSnapshot() { + this.snapshot = ""; + this.indexVersion = 0; + this.indexFiles = ImmutableList.of(); + this.startTime = 0; + this.time = 0; + this.numberOfFiles = 0; + this.totalSize = 0; + } + /** * Returns index version * @@ -429,25 +441,24 @@ public class BlobStoreIndexShardSnapshot { /** * Serializes shard snapshot metadata info into JSON * - * @param snapshot shard snapshot metadata * @param builder XContent builder * @param params parameters * @throws IOException */ - public static void toXContent(BlobStoreIndexShardSnapshot snapshot, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field(Fields.NAME, snapshot.snapshot); - builder.field(Fields.INDEX_VERSION, snapshot.indexVersion); - builder.field(Fields.START_TIME, snapshot.startTime); - builder.field(Fields.TIME, snapshot.time); - builder.field(Fields.NUMBER_OF_FILES, snapshot.numberOfFiles); - builder.field(Fields.TOTAL_SIZE, snapshot.totalSize); + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Fields.NAME, snapshot); + builder.field(Fields.INDEX_VERSION, indexVersion); + builder.field(Fields.START_TIME, startTime); + builder.field(Fields.TIME, time); + builder.field(Fields.NUMBER_OF_FILES, numberOfFiles); + builder.field(Fields.TOTAL_SIZE, totalSize); builder.startArray(Fields.FILES); - for (FileInfo fileInfo : snapshot.indexFiles) { + for (FileInfo fileInfo : indexFiles) { FileInfo.toXContent(fileInfo, builder, params); } builder.endArray(); - builder.endObject(); + return builder; } /** @@ -457,7 +468,7 @@ public class BlobStoreIndexShardSnapshot { * @return shard snapshot metadata * @throws IOException */ - public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { String snapshot = null; long indexVersion = -1; @@ -467,7 +478,9 @@ public class BlobStoreIndexShardSnapshot { long totalSize = 0; List indexFiles = newArrayList(); - + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -510,5 +523,4 @@ public class BlobStoreIndexShardSnapshot { return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, ImmutableList.copyOf(indexFiles), startTime, time, numberOfFiles, totalSize); } - } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 03e50e05d78..19bf4ee3932 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -24,10 +24,7 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import java.io.IOException; @@ -44,7 +41,10 @@ import static com.google.common.collect.Maps.newHashMap; * This class is used to find files that were already snapshoted and clear out files that no longer referenced by any * snapshots */ -public class BlobStoreIndexShardSnapshots implements Iterable, ToXContent { +public class BlobStoreIndexShardSnapshots implements Iterable, ToXContent, FromXContentBuilder { + + public static final BlobStoreIndexShardSnapshots PROTO = new BlobStoreIndexShardSnapshots(); + private final ImmutableList shardSnapshots; private final ImmutableMap files; private final ImmutableMap> physicalFiles; @@ -103,6 +103,12 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To this.physicalFiles = mapBuilder.build(); } + private BlobStoreIndexShardSnapshots() { + shardSnapshots = ImmutableList.of(); + files = ImmutableMap.of(); + physicalFiles = ImmutableMap.of(); + } + /** * Returns list of snapshots @@ -201,7 +207,6 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); // First we list all blobs with their file infos: builder.startArray(Fields.FILES); for (Map.Entry entry : files.entrySet()) { @@ -219,14 +224,15 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To builder.endArray(); builder.endObject(); } - builder.endObject(); - builder.endObject(); return builder; } - public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public BlobStoreIndexShardSnapshots fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { XContentParser.Token token = parser.currentToken(); + if (token == null) { // New parser + token = parser.nextToken(); + } Map> snapshotsMap = newHashMap(); ImmutableMap.Builder filesBuilder = ImmutableMap.builder(); if (token == XContentParser.Token.START_OBJECT) { diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index afc2dac3359..d40297b8d9c 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.store; import com.google.common.collect.Sets; import org.apache.lucene.store.*; import org.apache.lucene.util.Constants; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; @@ -74,11 +75,7 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim } protected final LockFactory buildLockFactory() throws IOException { - try { - return buildLockFactory(indexSettings); - } catch (IllegalArgumentException e) { - throw new StoreException(shardId, "unable to build lock factory", e); - } + return buildLockFactory(indexSettings); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 2617e95b418..db642f776e3 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -386,14 +386,25 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * corruption markers. */ public static boolean canOpenIndex(ESLogger logger, Path indexLocation) throws IOException { - try (Directory dir = new SimpleFSDirectory(indexLocation)) { - failIfCorrupted(dir, new ShardId("", 1)); - Lucene.readSegmentInfos(dir); - return true; + try { + tryOpenIndex(indexLocation); } catch (Exception ex) { logger.trace("Can't open index for path [{}]", ex, indexLocation); return false; } + return true; + } + + /** + * Tries to open an index for the given location. This includes reading the + * segment infos and possible corruption markers. If the index can not + * be opened, an exception is thrown + */ + public static void tryOpenIndex(Path indexLocation) throws IOException { + try (Directory dir = new SimpleFSDirectory(indexLocation)) { + failIfCorrupted(dir, new ShardId("", 1)); + Lucene.readSegmentInfos(dir); + } } /** diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java index 2e80cb6f782..2ebf279588e 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.translog; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,14 +28,15 @@ import java.io.IOException; /** * */ -public class TranslogException extends IndexShardException { +public class TranslogException extends ElasticsearchException { public TranslogException(ShardId shardId, String msg) { - super(shardId, msg); + this(shardId, msg, null); } public TranslogException(ShardId shardId, String msg, Throwable cause) { - super(shardId, msg, cause); + super(msg, cause); + setShard(shardId); } public TranslogException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java index 54baafe0a03..52e801a4cb2 100644 --- a/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java +++ b/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java @@ -19,22 +19,22 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import java.io.IOException; /** * */ -public class AliasFilterParsingException extends IndexException { +public class AliasFilterParsingException extends ElasticsearchException { public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) { - super(index, "[" + name + "], " + desc, ex); + super("[" + name + "], " + desc, ex); + setIndex(index); } - public AliasFilterParsingException(StreamInput in) throws IOException{ super(in); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java index 5bd12bc3aa4..9d6616e4e2b 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -29,14 +29,15 @@ import java.io.IOException; /** * */ -public class IndexAlreadyExistsException extends IndexException { +public class IndexAlreadyExistsException extends ElasticsearchException { public IndexAlreadyExistsException(Index index) { this(index, "already exists"); } public IndexAlreadyExistsException(Index index, String message) { - super(index, message); + super(message); + setIndex(index); } public IndexAlreadyExistsException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/IndexClosedException.java b/core/src/main/java/org/elasticsearch/indices/IndexClosedException.java index d522bf2c57c..22425ddc460 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexClosedException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexClosedException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -29,10 +29,11 @@ import java.io.IOException; /** * Exception indicating that one or more requested indices are closed. */ -public class IndexClosedException extends IndexException { +public class IndexClosedException extends ElasticsearchException { public IndexClosedException(Index index) { - super(index, "closed"); + super("closed"); + setIndex(index); } public IndexClosedException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java b/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java index e681376af09..09b6696e112 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java @@ -19,19 +19,20 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchWrapperException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import java.io.IOException; /** */ -public class IndexCreationException extends IndexException implements ElasticsearchWrapperException { +public class IndexCreationException extends ElasticsearchException implements ElasticsearchWrapperException { public IndexCreationException(Index index, Throwable cause) { - super(index, "failed to create index", cause); + super("failed to create index", cause); + setIndex(index); } public IndexCreationException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java b/core/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java index 0dda08fe7ff..b7dd3e68cec 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -30,13 +30,14 @@ import java.io.IOException; * Thrown when some action cannot be performed because the primary shard of * some shard group in an index has not been allocated post api action. */ -public class IndexPrimaryShardNotAllocatedException extends IndexException { +public class IndexPrimaryShardNotAllocatedException extends ElasticsearchException { public IndexPrimaryShardNotAllocatedException(StreamInput in) throws IOException{ super(in); } public IndexPrimaryShardNotAllocatedException(Index index) { - super(index, "primary not allocated post api"); + super("primary not allocated post api"); + setIndex(index); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index ba0241a043d..912a1ded50e 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -270,12 +270,12 @@ public class IndicesService extends AbstractLifecycleComponent i } /** - * Returns an IndexService for the specified index if exists otherwise a {@link IndexMissingException} is thrown. + * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ - public IndexService indexServiceSafe(String index) throws IndexMissingException { + public IndexService indexServiceSafe(String index) { IndexService indexService = indexService(index); if (indexService == null) { - throw new IndexMissingException(new Index(index)); + throw new IndexNotFoundException(index); } return indexService; } @@ -448,7 +448,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (IOException e) { @@ -467,13 +467,13 @@ public class IndicesService extends AbstractLifecycleComponent i String indexName = metaData.index(); if (indices.containsKey(indexName)) { String localUUid = indices.get(indexName).v1().indexUUID(); - throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } Index index = new Index(metaData.index()); diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java b/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java index 179f6ca8517..4e2c443ff4a 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -29,10 +29,11 @@ import java.io.IOException; /** * */ -public class InvalidAliasNameException extends IndexException { +public class InvalidAliasNameException extends ElasticsearchException { public InvalidAliasNameException(Index index, String name, String desc) { - super(index, "Invalid alias name [" + name + "], " + desc); + super("Invalid alias name [{}], {}", name, desc); + setIndex(index); } public InvalidAliasNameException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java b/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java index 468d158f389..163f4df26a2 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -29,10 +29,11 @@ import java.io.IOException; /** * */ -public class InvalidIndexNameException extends IndexException { +public class InvalidIndexNameException extends ElasticsearchException { public InvalidIndexNameException(Index index, String name, String desc) { - super(index, "Invalid index name [" + name + "], " + desc); + super("Invalid index name [" + name + "], " + desc); + setIndex(index); } public InvalidIndexNameException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java index ef4a1d6d8d5..0a332dbaf18 100644 --- a/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java +++ b/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java @@ -19,9 +19,9 @@ package org.elasticsearch.indices; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -30,10 +30,11 @@ import java.util.Arrays; /** * */ -public class TypeMissingException extends IndexException { +public class TypeMissingException extends ElasticsearchException { public TypeMissingException(Index index, String... types) { - super(index, "type[" + Arrays.toString(types) + "] missing"); + super("type[" + Arrays.toString(types) + "] missing"); + setIndex(index); } public TypeMissingException(StreamInput in) throws IOException{ diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index de93451b4a2..8be847fa97b 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -45,7 +45,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexShardAlreadyExistsException; -import org.elasticsearch.index.IndexShardMissingException; import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.DocumentMapper; @@ -64,8 +63,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; -import static org.elasticsearch.ExceptionsHelper.detailedMessage; - /** * */ @@ -300,7 +297,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent listener) { final ClusterState state = clusterService.state(); - final String[] concreteIndices = state.metaData().concreteIndices(indicesOptions, aliasesOrIndices); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); int totalNumberOfShards = 0; int numberOfShards = 0; @@ -236,11 +238,11 @@ public class SyncedFlushService extends AbstractComponent { if (index != null && index.state() == IndexMetaData.State.CLOSE) { throw new IndexClosedException(shardId.index()); } - throw new IndexMissingException(shardId.index()); + throw new IndexNotFoundException(shardId.index().getName()); } final IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.id()); if (shardRoutingTable == null) { - throw new IndexShardMissingException(shardId); + throw new ShardNotFoundException(shardId); } return shardRoutingTable; } @@ -426,7 +428,7 @@ public class SyncedFlushService extends AbstractComponent { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); if (indexShard.routingEntry().primary() == false) { - throw new IndexShardException(request.shardId(), "expected a primary shard"); + throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); } int opCount = indexShard.getOperationsCount(); logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java index 508e52f3590..69a55e03c9a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java @@ -19,11 +19,11 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchWrapperException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -32,15 +32,16 @@ import java.util.Objects; /** * */ -public class RecoverFilesRecoveryException extends IndexShardException implements ElasticsearchWrapperException { +public class RecoverFilesRecoveryException extends ElasticsearchException implements ElasticsearchWrapperException { private final int numberOfFiles; private final ByteSizeValue totalFilesSize; public RecoverFilesRecoveryException(ShardId shardId, int numberOfFiles, ByteSizeValue totalFilesSize, Throwable cause) { - super(shardId, "Failed to transfer [" + numberOfFiles + "] files with total size of [" + totalFilesSize + "]", cause); + super("Failed to transfer [{}] files with total size of [{}]", cause, numberOfFiles, totalFilesSize); Objects.requireNonNull(totalFilesSize, "totalFilesSize must not be null"); + setShard(shardId); this.numberOfFiles = numberOfFiles; this.totalFilesSize = totalFilesSize; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 22bd4336974..0388265e64c 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -43,13 +43,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.index.IndexShardMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.*; import org.elasticsearch.index.store.Store; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -231,7 +230,7 @@ public class RecoveryTarget extends AbstractComponent { // here, we would add checks against exception that need to be retried (and not removeAndClean in this case) - if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) { + if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) { // if the target is not ready yet, retry retryRecovery(recoveryStatus, "remote shard not ready", recoverySettings.retryDelayStateSync(), request); return; diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index b5ba372c22c..493d13a854d 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -208,7 +208,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe // TODO will have to ammend this for shadow replicas so we don't delete the shared copy... private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) { List> requests = new ArrayList<>(indexShardRoutingTable.size()); - String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getUUID(); + String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getIndexUUID(); ClusterName clusterName = state.getClusterName(); for (ShardRouting shardRouting : indexShardRoutingTable) { // Node can't be null, because otherwise shardCanBeDeleted() would have returned false diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index f73576fef79..1d4eefdcb05 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -70,8 +71,9 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction percolateQueries; private final int numberOfShards; private final Query aliasFilter; + private final long startTime; private String[] types; private Engine.Searcher docSearcher; @@ -133,6 +134,7 @@ public class PercolateContext extends SearchContext { this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); this.aliasFilter = aliasFilter; + this.startTime = request.getStartTime(); } public IndexSearcher docSearcher() { @@ -337,7 +339,7 @@ public class PercolateContext extends SearchContext { @Override protected long nowInMillisImpl() { - throw new UnsupportedOperationException(); + return startTime; } @Override diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index d74bd8c721f..b035f3fb8b0 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -34,6 +34,7 @@ import org.elasticsearch.action.percolate.PercolateShardResponse; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -92,6 +93,7 @@ public class PercolatorService extends AbstractComponent { public final static float NO_SCORE = Float.NEGATIVE_INFINITY; public final static String TYPE_NAME = ".percolator"; + private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndicesService indicesService; private final IntObjectHashMap percolatorTypes; private final PageCacheRecycler pageCacheRecycler; @@ -112,12 +114,13 @@ public class PercolatorService extends AbstractComponent { private final ParseFieldMatcher parseFieldMatcher; @Inject - public PercolatorService(Settings settings, IndicesService indicesService, + public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, HighlightPhase highlightPhase, ClusterService clusterService, AggregationPhase aggregationPhase, ScriptService scriptService, MappingUpdatedAction mappingUpdatedAction) { super(settings); + this.indexNameExpressionResolver = indexNameExpressionResolver; this.parseFieldMatcher = new ParseFieldMatcher(settings); this.indicesService = indicesService; this.pageCacheRecycler = pageCacheRecycler; @@ -164,7 +167,10 @@ public class PercolatorService extends AbstractComponent { shardPercolateService.prePercolate(); long startTime = System.nanoTime(); - String[] filteringAliases = clusterService.state().getMetaData().filteringAliases( + // TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request, + // just like is done in other apis. + String[] filteringAliases = indexNameExpressionResolver.filteringAliases( + clusterService.state(), indexShard.shardId().index().name(), request.indices() ); @@ -174,6 +180,7 @@ public class PercolatorService extends AbstractComponent { final PercolateContext context = new PercolateContext( request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService, aliasFilter, parseFieldMatcher ); + SearchContext.setCurrent(context); try { ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context); if (context.percolateQueries().isEmpty()) { @@ -229,6 +236,7 @@ public class PercolatorService extends AbstractComponent { percolatorIndex.prepare(context, parsedDocument); return action.doPercolate(request, context, isNested); } finally { + SearchContext.removeCurrent(); context.close(); shardPercolateService.postPercolate(System.nanoTime() - startTime); } @@ -252,7 +260,6 @@ public class PercolatorService extends AbstractComponent { // not the in memory percolate doc String[] previousTypes = context.types(); context.types(new String[]{TYPE_NAME}); - SearchContext.setCurrent(context); try { parser = XContentFactory.xContent(source).createParser(source); String currentFieldName = null; @@ -353,7 +360,6 @@ public class PercolatorService extends AbstractComponent { throw new ElasticsearchParseException("failed to parse request", e); } finally { context.types(previousTypes); - SearchContext.removeCurrent(); if (parser != null) { parser.close(); } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java new file mode 100644 index 00000000000..0a3e97170aa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.blobstore; + +import com.google.common.collect.Maps; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.*; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * Base class that handles serialization of various data structures during snapshot/restore operations. + */ +public abstract class BlobStoreFormat { + + protected final String blobNameFormat; + + protected final FromXContentBuilder reader; + + protected final ParseFieldMatcher parseFieldMatcher; + + // Serialization parameters to specify correct context for metadata serialization + protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + + static { + Map snapshotOnlyParams = Maps.newHashMap(); + // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot + // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT + snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); + SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); + } + + /** + * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format + * @param reader the prototype object that can deserialize objects with type T + * @param parseFieldMatcher parse field matcher + */ + protected BlobStoreFormat(String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher) { + this.reader = reader; + this.blobNameFormat = blobNameFormat; + this.parseFieldMatcher = parseFieldMatcher; + } + + /** + * Reads and parses the blob with given blob name. + * + * @param blobContainer blob container + * @param blobName blob name + * @return parsed blob object + * @throws IOException + */ + public abstract T readBlob(BlobContainer blobContainer, String blobName) throws IOException; + + /** + * Reads and parses the blob with given name, applying name translation using the {link #blobName} method + * + * @param blobContainer blob container + * @param name name to be translated into + * @return parsed blob object + * @throws IOException + */ + public T read(BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + return readBlob(blobContainer, blobName); + } + + + /** + * Deletes obj in the blob container + */ + public void delete(BlobContainer blobContainer, String name) throws IOException { + blobContainer.deleteBlob(blobName(name)); + } + + /** + * Checks obj in the blob container + */ + public boolean exists(BlobContainer blobContainer, String name) throws IOException { + return blobContainer.blobExists(blobName(name)); + } + + protected String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + + protected T read(BytesReference bytes) throws IOException { + try (XContentParser parser = XContentHelper.createParser(bytes)) { + T obj = reader.fromXContent(parser, parseFieldMatcher); + return obj; + + } + } +} diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 58fb03789b0..4af5b7c00f4 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -19,18 +19,15 @@ package org.elasticsearch.repositories.blobstore; -import com.fasterxml.jackson.core.JsonParseException; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Maps; import com.google.common.io.ByteStreams; - import org.apache.lucene.store.RateLimiter; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -39,7 +36,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.compress.NotXContentException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; @@ -47,13 +43,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; @@ -62,12 +56,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositorySettings; import org.elasticsearch.repositories.RepositoryVerificationException; -import org.elasticsearch.snapshots.InvalidSnapshotNameException; -import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotCreationException; -import org.elasticsearch.snapshots.SnapshotException; -import org.elasticsearch.snapshots.SnapshotMissingException; -import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.snapshots.*; import java.io.FileNotFoundException; import java.io.IOException; @@ -94,13 +83,13 @@ import static com.google.common.collect.Lists.newArrayList; * STORE_ROOT * |- index - list of all snapshot name as JSON array * |- snapshot-20131010 - JSON serialized Snapshot for snapshot "20131010" - * |- metadata-20131010 - JSON serialized MetaData for snapshot "20131010" (includes only global metadata) + * |- meta-20131010.dat - JSON serialized MetaData for snapshot "20131010" (includes only global metadata) * |- snapshot-20131011 - JSON serialized Snapshot for snapshot "20131011" - * |- metadata-20131011 - JSON serialized MetaData for snapshot "20131011" + * |- meta-20131011.dat - JSON serialized MetaData for snapshot "20131011" * ..... * |- indices/ - data for all indices * |- foo/ - data for index "foo" - * | |- snapshot-20131010 - JSON Serialized IndexMetaData for index "foo" + * | |- meta-20131010.dat - JSON Serialized IndexMetaData for index "foo" * | |- 0/ - data for shard "0" of index "foo" * | | |- __1 \ * | | |- __2 | @@ -108,8 +97,9 @@ import static com.google.common.collect.Lists.newArrayList; * | | |- __4 | * | | |- __5 / * | | ..... - * | | |- snapshot-20131010 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131010" - * | | |- snapshot-20131011 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011" + * | | |- snap-20131010.dat - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131010" + * | | |- snap-20131011.dat - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011" + * | | |- list-123 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011" * | | * | |- 1/ - data for shard "1" of index "foo" * | | |- __1 @@ -129,24 +119,35 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent globalMetaDataFormat; + + private LegacyBlobStoreFormat globalMetaDataLegacyFormat; + + private ChecksumBlobStoreFormat indexMetaDataFormat; + + private LegacyBlobStoreFormat indexMetaDataLegacyFormat; + + private ChecksumBlobStoreFormat snapshotFormat; + + private LegacyBlobStoreFormat snapshotLegacyFormat; /** * Constructs new BlobStoreRepository @@ -167,9 +179,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snpashotOnlyParams = Maps.newHashMap(); - snpashotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); - snapshotOnlyFormatParams = new ToXContent.MapParams(snpashotOnlyParams); snapshotRateLimiter = getRateLimiter(repositorySettings, "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(repositorySettings, "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); } @@ -181,6 +190,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent(METADATA_CODEC, METADATA_NAME_FORMAT, MetaData.PROTO, parseFieldMatcher, isCompress()); + globalMetaDataLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_METADATA_NAME_FORMAT, MetaData.PROTO, parseFieldMatcher); + + indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, IndexMetaData.PROTO, parseFieldMatcher, isCompress()); + indexMetaDataLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, IndexMetaData.PROTO, parseFieldMatcher); + + snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, Snapshot.PROTO, parseFieldMatcher, isCompress()); + snapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, Snapshot.PROTO, parseFieldMatcher); } /** @@ -242,26 +261,17 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indices, MetaData metaData) { try { - String snapshotBlobName = snapshotBlobName(snapshotId); - if (snapshotsBlobContainer.blobExists(snapshotBlobName)) { + if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getSnapshot()) || + snapshotLegacyFormat.exists(snapshotsBlobContainer, snapshotId.getSnapshot())) { throw new InvalidSnapshotNameException(snapshotId, "snapshot with such name already exists"); } // Write Global MetaData - // TODO: Check if metadata needs to be written - try (StreamOutput output = compressIfNeeded(snapshotsBlobContainer.createOutput(metaDataBlobName(snapshotId, false)))) { - writeGlobalMetaData(metaData, output); - } + globalMetaDataFormat.write(metaData, snapshotsBlobContainer, snapshotId.getSnapshot()); for (String index : indices) { final IndexMetaData indexMetaData = metaData.index(index); final BlobPath indexPath = basePath().add("indices").add(index); final BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); - try (StreamOutput output = compressIfNeeded(indexMetaDataBlobContainer.createOutput(snapshotBlobName(snapshotId)))) { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, output); - builder.startObject(); - IndexMetaData.Builder.toXContent(indexMetaData, builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - builder.close(); - } + indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, snapshotId.getSnapshot()); } } catch (IOException ex) { throw new SnapshotCreationException(snapshotId, ex); @@ -280,7 +290,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots(); @@ -325,7 +332,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indices, long startTime, String failure, int totalShards, List shardFailures) { try { - String tempBlobName = tempSnapshotBlobName(snapshotId); - String blobName = snapshotBlobName(snapshotId); Snapshot blobStoreSnapshot = new Snapshot(snapshotId.getSnapshot(), indices, startTime, failure, System.currentTimeMillis(), totalShards, shardFailures); - try (StreamOutput output = compressIfNeeded(snapshotsBlobContainer.createOutput(tempBlobName))) { - writeSnapshot(blobStoreSnapshot, output); - } - snapshotsBlobContainer.move(tempBlobName, blobName); + snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getSnapshot()); List snapshotIds = snapshots(); if (!snapshotIds.contains(snapshotId)) { snapshotIds = ImmutableList.builder().addAll(snapshotIds).add(snapshotId).build(); @@ -402,14 +383,25 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshots = newArrayList(); Map blobs; try { - blobs = snapshotsBlobContainer.listBlobsByPrefix(SNAPSHOT_PREFIX); + blobs = snapshotsBlobContainer.listBlobsByPrefix(COMMON_SNAPSHOT_PREFIX); } catch (UnsupportedOperationException ex) { // Fall back in case listBlobsByPrefix isn't supported by the blob store return readSnapshotList(); } int prefixLength = SNAPSHOT_PREFIX.length(); + int suffixLength = SNAPSHOT_SUFFIX.length(); + int legacyPrefixLength = LEGACY_SNAPSHOT_PREFIX.length(); for (BlobMetaData md : blobs.values()) { - String name = md.name().substring(prefixLength); + String blobName = md.name(); + final String name; + if (blobName.startsWith(SNAPSHOT_PREFIX) && blobName.length() > legacyPrefixLength) { + name = blobName.substring(prefixLength, blobName.length() - suffixLength); + } else if (blobName.startsWith(LEGACY_SNAPSHOT_PREFIX) && blobName.length() > suffixLength + prefixLength) { + name = blobName.substring(legacyPrefixLength); + } else { + // not sure what it was - ignore + continue; + } snapshots.add(new SnapshotId(repositoryName, name)); } return ImmutableList.copyOf(snapshots); @@ -432,24 +424,37 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indices, boolean ignoreIndexErrors) throws IOException { - return readSnapshotMetaData(snapshotId, legacyMetaData(snapshotVersion), indices, ignoreIndexErrors); - } - - private MetaData readSnapshotMetaData(SnapshotId snapshotId, boolean legacy, List indices, boolean ignoreIndexErrors) throws IOException { MetaData metaData; - try (InputStream blob = snapshotsBlobContainer.openInput(metaDataBlobName(snapshotId, legacy))) { - metaData = readMetaData(ByteStreams.toByteArray(blob)); + if (snapshotVersion == null) { + // When we delete corrupted snapshots we might not know which version we are dealing with + // We can try detecting the version based on the metadata file format + assert ignoreIndexErrors; + if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getSnapshot())) { + snapshotVersion = Version.CURRENT; + } else if (globalMetaDataLegacyFormat.exists(snapshotsBlobContainer, snapshotId.getSnapshot())) { + snapshotVersion = Version.V_1_0_0; + } else { + throw new SnapshotMissingException(snapshotId); + } + } + try { + metaData = globalMetaDataFormat(snapshotVersion).read(snapshotsBlobContainer, snapshotId.getSnapshot()); } catch (FileNotFoundException | NoSuchFileException ex) { throw new SnapshotMissingException(snapshotId, ex); } catch (IOException ex) { @@ -459,28 +464,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent globalMetaDataFormat(Version version) { + if(legacyMetaData(version)) { + return globalMetaDataLegacyFormat; } else { - return METADATA_PREFIX + snapshotId.getSnapshot() + METADATA_SUFFIX; + return globalMetaDataFormat; + } + } + + /** + * Returns appropriate snapshot format based on the provided version of the snapshot + */ + private BlobStoreFormat snapshotFormat(Version version) { + if(legacyMetaData(version)) { + return snapshotLegacyFormat; + } else { + return snapshotFormat; } } @@ -593,38 +522,19 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indexMetaDataFormat(Version version) { + if(legacyMetaData(version)) { + return indexMetaDataLegacyFormat; + } else { + return indexMetaDataFormat; + } } /** @@ -636,18 +546,22 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshots) throws IOException { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = compressIfNeeded(bStream); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream); - builder.startObject(); - builder.startArray("snapshots"); - for (SnapshotId snapshot : snapshots) { - builder.value(snapshot.getSnapshot()); + final BytesReference bRef; + try(BytesStreamOutput bStream = new BytesStreamOutput()) { + try(StreamOutput stream = new OutputStreamStreamOutput(bStream)) { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream); + builder.startObject(); + builder.startArray("snapshots"); + for (SnapshotId snapshot : snapshots) { + builder.value(snapshot.getSnapshot()); + } + builder.endArray(); + builder.endObject(); + builder.close(); + } + bRef = bStream.bytes(); } - builder.endArray(); - builder.endObject(); - builder.close(); - BytesReference bRef = bStream.bytes(); + snapshotsBlobContainer.deleteBlob(SNAPSHOTS_FILE); try (OutputStream output = snapshotsBlobContainer.createOutput(SNAPSHOTS_FILE)) { bRef.writeTo(output); } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java new file mode 100644 index 00000000000..cc1323e9207 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -0,0 +1,218 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.blobstore; + +import com.google.common.io.ByteStreams; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.gateway.CorruptStateException; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Locale; + +/** + * Snapshot metadata file format used in v2.0 and above + */ +public class ChecksumBlobStoreFormat extends BlobStoreFormat { + + private static final String TEMP_FILE_PREFIX = "pending-"; + + private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; + + // The format version + public static final int VERSION = 1; + + private static final int BUFFER_SIZE = 4096; + + protected final XContentType xContentType; + + protected final boolean compress; + + private final String codec; + + /** + * @param codec codec name + * @param blobNameFormat format of the blobname in {@link String#format} format + * @param reader prototype object that can deserialize T from XContent + * @param compress true if the content should be compressed + * @param xContentType content type that should be used for write operations + */ + public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher, boolean compress, XContentType xContentType) { + super(blobNameFormat, reader, parseFieldMatcher); + this.xContentType = xContentType; + this.compress = compress; + this.codec = codec; + } + + /** + * @param codec codec name + * @param blobNameFormat format of the blobname in {@link String#format} format + * @param reader prototype object that can deserialize T from XContent + * @param compress true if the content should be compressed + */ + public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher, boolean compress) { + this(codec, blobNameFormat, reader, parseFieldMatcher, compress, DEFAULT_X_CONTENT_TYPE); + } + + /** + * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. + * + * @param blobContainer blob container + * @param blobName blob name + * @return + * @throws IOException + */ + public T readBlob(BlobContainer blobContainer, String blobName) throws IOException { + try (InputStream inputStream = blobContainer.openInput(blobName)) { + byte[] bytes = ByteStreams.toByteArray(inputStream); + final String resourceDesc = "ChecksumBlobStoreFormat.readBlob(blob=\"" + blobName + "\")"; + try (ByteArrayIndexInput indexInput = new ByteArrayIndexInput(resourceDesc, bytes)) { + CodecUtil.checksumEntireFile(indexInput); + CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); + long filePointer = indexInput.getFilePointer(); + long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; + BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); + return read(bytesReference); + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + // we trick this into a dedicated exception with the original stacktrace + throw new CorruptStateException(ex); + } + } + } + + /** + * Writes blob in atomic manner with resolving the blob name using {@link #blobName} and {@link #tempBlobName} methods. + *

+ * The blob will be compressed and checksum will be written if required. + * + * Atomic move might be very inefficient on some repositories. It also cannot override existing files. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @throws IOException + */ + public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + String tempBlobName = tempBlobName(name); + writeBlob(obj, blobContainer, tempBlobName); + try { + blobContainer.move(tempBlobName, blobName); + } catch (IOException ex) { + // Move failed - try cleaning up + blobContainer.deleteBlob(tempBlobName); + throw ex; + } + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

+ * The blob will be compressed and checksum will be written if required. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @throws IOException + */ + public void write(T obj, BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + writeBlob(obj, blobContainer, blobName); + } + + /** + * Writes blob in atomic manner without resolving the blobName using using {@link #blobName} method. + *

+ * The blob will be compressed and checksum will be written if required. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param blobName blob name + * @throws IOException + */ + protected void writeBlob(T obj, BlobContainer blobContainer, String blobName) throws IOException { + BytesReference bytes = write(obj); + try (OutputStream outputStream = blobContainer.createOutput(blobName)) { + final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; + try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, outputStream, BUFFER_SIZE)) { + CodecUtil.writeHeader(indexOutput, codec, VERSION); + try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { + @Override + public void close() throws IOException { + // this is important since some of the XContentBuilders write bytes on close. + // in order to write the footer we need to prevent closing the actual index input. + } }) { + bytes.writeTo(indexOutputOutputStream); + } + CodecUtil.writeFooter(indexOutput); + } + } + } + + /** + * Returns true if the blob is a leftover temporary blob. + * + * The temporary blobs might be left after failed atomic write operation. + */ + public boolean isTempBlobName(String blobName) { + return blobName.startsWith(ChecksumBlobStoreFormat.TEMP_FILE_PREFIX); + } + + protected BytesReference write(T obj) throws IOException { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + return bytesStreamOutput.bytes(); + } + } + + protected void write(T obj, StreamOutput streamOutput) throws IOException { + try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { + builder.startObject(); + obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + builder.endObject(); + } + } + + + protected String tempBlobName(String name) { + return TEMP_FILE_PREFIX + String.format(Locale.ROOT, blobNameFormat, name); + } + +} diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/LegacyBlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/LegacyBlobStoreFormat.java new file mode 100644 index 00000000000..5fcc8ec47a6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/LegacyBlobStoreFormat.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.blobstore; + +import com.google.common.io.ByteStreams; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.FromXContentBuilder; +import org.elasticsearch.common.xcontent.ToXContent; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Snapshot metadata file format used before v2.0 + */ +public class LegacyBlobStoreFormat extends BlobStoreFormat { + + /** + * @param blobNameFormat format of the blobname in {@link String#format} format + * @param reader the prototype object that can deserialize objects with type T + */ + public LegacyBlobStoreFormat(String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher) { + super(blobNameFormat, reader, parseFieldMatcher); + } + + /** + * Reads and parses the blob with given name. + * + * If required the checksum of the blob will be verified. + * + * @param blobContainer blob container + * @param blobName blob name + * @return parsed blob object + * @throws IOException + */ + public T readBlob(BlobContainer blobContainer, String blobName) throws IOException { + try (InputStream inputStream = blobContainer.openInput(blobName)) { + return read(new BytesArray(ByteStreams.toByteArray(inputStream))); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java index c2db2391703..f3c439b59c5 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java @@ -19,12 +19,14 @@ package org.elasticsearch.repositories.uri; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.URIPattern; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryName; @@ -32,6 +34,7 @@ import org.elasticsearch.repositories.RepositorySettings; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import java.io.IOException; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; @@ -48,6 +51,18 @@ public class URLRepository extends BlobStoreRepository { public final static String TYPE = "url"; + public final static String[] DEFAULT_SUPPORTED_PROTOCOLS = {"http", "https", "ftp", "file", "jar"}; + + public final static String SUPPORTED_PROTOCOLS_SETTING = "repositories.url.supported_protocols"; + + public final static String ALLOWED_URLS_SETTING = "repositories.url.allowed_urls"; + + private final String[] supportedProtocols; + + private final URIPattern[] urlWhiteList; + + private final Environment environment; + private final URLBlobStore blobStore; private final BlobPath basePath; @@ -63,17 +78,25 @@ public class URLRepository extends BlobStoreRepository { * @throws IOException */ @Inject - public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException { + public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, Environment environment) throws IOException { super(name.getName(), repositorySettings, indexShardRepository); URL url; - String path = repositorySettings.settings().get("url", settings.get("repositories.uri.url")); + String path = repositorySettings.settings().get("url", settings.get("repositories.url.url", settings.get("repositories.uri.url"))); if (path == null) { throw new RepositoryException(name.name(), "missing url"); } else { url = new URL(path); } + supportedProtocols = settings.getAsArray(SUPPORTED_PROTOCOLS_SETTING, DEFAULT_SUPPORTED_PROTOCOLS); + String[] urlWhiteList = settings.getAsArray(ALLOWED_URLS_SETTING, Strings.EMPTY_ARRAY); + this.urlWhiteList = new URIPattern[urlWhiteList.length]; + for (int i = 0; i < urlWhiteList.length; i++) { + this.urlWhiteList[i] = new URIPattern(urlWhiteList[i]); + } + this.environment = environment; listDirectories = repositorySettings.settings().getAsBoolean("list_directories", settings.getAsBoolean("repositories.uri.list_directories", true)); - blobStore = new URLBlobStore(settings, url); + URL normalizedURL = checkURL(url); + blobStore = new URLBlobStore(settings, normalizedURL); basePath = BlobPath.cleanPath(); } @@ -114,4 +137,35 @@ public class URLRepository extends BlobStoreRepository { throw new UnsupportedOperationException("shouldn't be called"); } + /** + * Makes sure that the url is white listed or if it points to the local file system it matches one on of the root path in path.repo + */ + private URL checkURL(URL url) { + String protocol = url.getProtocol(); + if (protocol == null) { + throw new RepositoryException(repositoryName, "unknown url protocol from URL [" + url + "]"); + } + for (String supportedProtocol : supportedProtocols) { + if (supportedProtocol.equals(protocol)) { + try { + if (URIPattern.match(urlWhiteList, url.toURI())) { + // URL matches white list - no additional processing is needed + return url; + } + } catch (URISyntaxException ex) { + logger.warn("cannot parse the specified url [{}]", url); + throw new RepositoryException(repositoryName, "cannot parse the specified url [" + url + "]"); + } + // We didn't match white list - try to resolve against repo.path + URL normalizedUrl = environment.resolveRepoURL(url); + if (normalizedUrl == null) { + logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] or by repositories.url.allowed_urls setting: [{}] ", url, environment.repoFiles()); + throw new RepositoryException(repositoryName, "file url [" + url + "] doesn't match any of the locations specified by path.repo or repositories.url.allowed_urls"); + } + return normalizedUrl; + } + } + throw new RepositoryException(repositoryName, "unsupported url protocol [" + protocol + "] from URL [" + url + "]"); + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index 579c42a8594..a13a038d6e5 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -67,6 +67,7 @@ import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; import org.elasticsearch.rest.action.admin.indices.optimize.RestOptimizeAction; import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; @@ -155,6 +156,7 @@ public class RestActionModule extends AbstractModule { bind(RestGetIndicesAction.class).asEagerSingleton(); bind(RestIndicesStatsAction.class).asEagerSingleton(); bind(RestIndicesSegmentsAction.class).asEagerSingleton(); + bind(RestIndicesShardStoresAction.class).asEagerSingleton(); bind(RestGetAliasesAction.class).asEagerSingleton(); bind(RestAliasesExistAction.class).asEagerSingleton(); bind(RestIndexDeleteAliasesAction.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java deleted file mode 100644 index d6ac3042d21..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesMissingException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.rest.action.admin.indices.alias.delete; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Arrays; - -/** - * - */ -public class AliasesMissingException extends ElasticsearchException { - - private final String[] names; - - public AliasesMissingException(String... names) { - super("aliases " + Arrays.toString(names) + " missing"); - this.names = names; - } - - public String[] names() { - return this.names; - } - - @Override - public RestStatus status() { - return RestStatus.NOT_FOUND; - } - - public AliasesMissingException(StreamInput in) throws IOException{ - super(in); - names = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); - } -} diff --git a/core/src/main/java/org/elasticsearch/indices/IndexMissingException.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesNotFoundException.java similarity index 66% rename from core/src/main/java/org/elasticsearch/indices/IndexMissingException.java rename to core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesNotFoundException.java index 8b6d7735c93..6e183786df1 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexMissingException.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/AliasesNotFoundException.java @@ -16,31 +16,25 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.rest.action.admin.indices.alias.delete; -package org.elasticsearch.indices; - +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexException; -import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Arrays; /** * */ -public class IndexMissingException extends IndexException { +public class AliasesNotFoundException extends ResourceNotFoundException { - public IndexMissingException(Index index) { - super(index, "no such index"); + public AliasesNotFoundException(String... names) { + super("aliases " + Arrays.toString(names) + " missing"); + this.setResources("aliases", names); } - public IndexMissingException(StreamInput in) throws IOException{ + public AliasesNotFoundException(StreamInput in) throws IOException{ super(in); } - - @Override - public RestStatus status() { - return RestStatus.NOT_FOUND; - } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java index 3e9b40027f4..3cfb6f6da64 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java @@ -32,7 +32,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; @@ -71,7 +71,7 @@ public class RestGetMappingAction extends BaseRestHandler { if (indices.length != 0 && types.length != 0) { return new BytesRestResponse(OK, builder.endObject()); } else if (indices.length != 0) { - return new BytesRestResponse(channel, new IndexMissingException(new Index(indices[0]))); + return new BytesRestResponse(channel, new IndexNotFoundException(indices[0])); } else if (types.length != 0) { return new BytesRestResponse(channel, new TypeMissingException(new Index("_all"), types[0])); } else { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java new file mode 100644 index 00000000000..a776efb63a7 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices.shards; + +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestBuilderListener; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.OK; + +/** + * Rest action for {@link IndicesShardStoresAction} + */ +public class RestIndicesShardStoresAction extends BaseRestHandler { + + @Inject + public RestIndicesShardStoresAction(Settings settings, RestController controller, Client client) { + super(settings, controller, client); + controller.registerHandler(GET, "/_shard_stores", this); + controller.registerHandler(GET, "/{index}/_shard_stores", this); + } + + @Override + public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + IndicesShardStoresRequest indicesShardStoresRequest = new IndicesShardStoresRequest(Strings.splitStringByCommaToArray(request.param("index"))); + if (request.hasParam("status")) { + indicesShardStoresRequest.shardStatuses(Strings.splitStringByCommaToArray(request.param("status"))); + } + indicesShardStoresRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesShardStoresRequest.indicesOptions())); + client.admin().indices().shardStores(indicesShardStoresRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(IndicesShardStoresResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index 7e21aacb26d..352588cca71 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -30,7 +30,9 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; @@ -47,9 +49,12 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestIndicesAction extends AbstractCatAction { + private final IndexNameExpressionResolver indexNameExpressionResolver; + @Inject - public RestIndicesAction(Settings settings, RestController controller, Client client) { + public RestIndicesAction(Settings settings, RestController controller, Client client, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, controller, client); + this.indexNameExpressionResolver = indexNameExpressionResolver; controller.registerHandler(GET, "/_cat/indices", this); controller.registerHandler(GET, "/_cat/indices/{index}", this); } @@ -71,8 +76,9 @@ public class RestIndicesAction extends AbstractCatAction { client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { - final String[] concreteIndices = clusterStateResponse.getState().metaData().concreteIndices(IndicesOptions.fromOptions(false, true, true, true), indices); - final String[] openIndices = clusterStateResponse.getState().metaData().concreteIndices(IndicesOptions.lenientExpandOpen(), indices); + ClusterState state = clusterStateResponse.getState(); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.fromOptions(false, true, true, true), indices); + final String[] openIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.lenientExpandOpen(), indices); ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); client.admin().cluster().health(clusterHealthRequest, new RestActionListener(channel) { @@ -244,6 +250,15 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell("search.query_total", "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops"); + table.addCell("search.scroll_current", "sibling:pri;alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); + table.addCell("pri.search.scroll_current", "default:false;text-align:right;desc:open scroll contexts"); + + table.addCell("search.scroll_time", "sibling:pri;alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); + table.addCell("pri.search.scroll_time", "default:false;text-align:right;desc:time scroll contexts held open"); + + table.addCell("search.scroll_total", "sibling:pri;alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts"); + table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments"); @@ -443,6 +458,15 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getQueryCount()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getQueryCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getScrollCurrent()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getScrollCurrent()); + + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getScrollTime()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getScrollTime()); + + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getScrollCount()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getScrollCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getCount()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getCount()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index c2b63cea49f..91e0235f54b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -190,6 +190,9 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); + table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); + table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -322,6 +325,9 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 802607829ff..76698efcaa6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -146,6 +146,9 @@ public class RestShardsAction extends AbstractCatAction { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); + table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); + table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -277,6 +280,9 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryCurrent()); table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryTime()); table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getQueryCount()); + table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getScrollCurrent()); + table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getScrollTime()); + table.addCell(shardStats == null ? null : shardStats.getSearch().getTotal().getScrollCount()); table.addCell(shardStats == null ? null : shardStats.getSegments().getCount()); table.addCell(shardStats == null ? null : shardStats.getSegments().getMemory()); diff --git a/core/src/main/java/org/elasticsearch/script/CompiledScript.java b/core/src/main/java/org/elasticsearch/script/CompiledScript.java index 9e3bfaf3f4c..aa34678c041 100644 --- a/core/src/main/java/org/elasticsearch/script/CompiledScript.java +++ b/core/src/main/java/org/elasticsearch/script/CompiledScript.java @@ -24,17 +24,39 @@ package org.elasticsearch.script; */ public class CompiledScript { + private final ScriptService.ScriptType type; + private final String name; private final String lang; private final Object compiled; /** * Constructor for CompiledScript. + * @param type The type of script to be executed. + * @param name The name of the script to be executed. * @param lang The language of the script to be executed. * @param compiled The compiled script Object that is executable. */ - public CompiledScript(String lang, Object compiled) { - this.lang = lang; - this.compiled = compiled; + public CompiledScript(ScriptService.ScriptType type, String name, String lang, Object compiled) { + this.type = type; + this.name = name; + this.lang = lang; + this.compiled = compiled; + } + + /** + * Method to get the type of language. + * @return The type of language the script was compiled in. + */ + public ScriptService.ScriptType type() { + return type; + } + + /** + * Method to get the name of the script. + * @return The name of the script to be executed. + */ + public String name() { + return name; } /** @@ -52,4 +74,12 @@ public class CompiledScript { public Object compiled() { return compiled; } + + /** + * @return A string composed of type, lang, and name to describe the CompiledScript. + */ + @Override + public String toString() { + return type + " script [" + name + "] using lang [" + lang + "]"; + } } diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 70bf27b82e4..b46bc7328d0 100644 --- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -71,14 +71,14 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri } @Override - public ExecutableScript executable(Object compiledScript, @Nullable Map vars) { - NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript; + public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars) { + NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript.compiled(); return scriptFactory.newScript(vars); } @Override - public SearchScript search(Object compiledScript, final SearchLookup lookup, @Nullable final Map vars) { - final NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript; + public SearchScript search(CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + final NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript.compiled(); return new SearchScript() { @Override public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { @@ -90,7 +90,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { return executable(compiledScript, vars).run(); } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 7b78427ebc3..966085754c0 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -38,11 +38,11 @@ public interface ScriptEngineService extends Closeable { Object compile(String script); - ExecutableScript executable(Object compiledScript, @Nullable Map vars); + ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars); - SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map vars); + SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars); - Object execute(Object compiledScript, Map vars); + Object execute(CompiledScript compiledScript, Map vars); Object unwrap(Object value); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index b41be3250c4..e683e5d66fd 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -249,50 +249,67 @@ public class ScriptService extends AbstractComponent implements Closeable { } /** - * Compiles a script straight-away, or returns the previously compiled and cached script, without checking if it can be executed based on settings. + * Compiles a script straight-away, or returns the previously compiled and cached script, + * without checking if it can be executed based on settings. */ public CompiledScript compileInternal(Script script) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } - String lang = script.getLang(); + String lang = script.getLang() == null ? defaultLang : script.getLang(); + ScriptType type = script.getType(); + //script.getScript() could return either a name or code for a script, + //but we check for a file script name first and an indexed script name second + String name = script.getScript(); - if (lang == null) { - lang = defaultLang; - } if (logger.isTraceEnabled()) { - logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, script.getType(), script.getScript()); + logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, type, name); } ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); - String cacheKey = getCacheKey(scriptEngineService, script.getScript()); - if (script.getType() == ScriptType.FILE) { - CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener - if (compiled == null) { - throw new IllegalArgumentException("Unable to find on disk script " + script.getScript()); + if (type == ScriptType.FILE) { + String cacheKey = getCacheKey(scriptEngineService, name, null); + //On disk scripts will be loaded into the staticCache by the listener + CompiledScript compiledScript = staticCache.get(cacheKey); + + if (compiledScript == null) { + throw new IllegalArgumentException("Unable to find on disk file script [" + name + "] using lang [" + lang + "]"); } - return compiled; + + return compiledScript; } + //script.getScript() will be code if the script type is inline String code = script.getScript(); - if (script.getType() == ScriptType.INDEXED) { - final IndexedScript indexedScript = new IndexedScript(lang, script.getScript()); + if (type == ScriptType.INDEXED) { + //The look up for an indexed script must be done every time in case + //the script has been updated in the index since the last look up. + final IndexedScript indexedScript = new IndexedScript(lang, name); + name = indexedScript.id; code = getScriptFromIndex(indexedScript.lang, indexedScript.id); - cacheKey = getCacheKey(scriptEngineService, code); } - CompiledScript compiled = cache.getIfPresent(cacheKey); - if (compiled == null) { - //Either an un-cached inline script or an indexed script - compiled = new CompiledScript(lang, scriptEngineService.compile(code)); + String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); + CompiledScript compiledScript = cache.getIfPresent(cacheKey); + + if (compiledScript == null) { + //Either an un-cached inline script or indexed script + //If the script type is inline the name will be the same as the code for identification in exceptions + try { + compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code)); + } catch (Exception exception) { + throw new ScriptException("Failed to compile " + type + " script [" + name + "] using lang [" + lang + "]", exception); + } + //Since the cache key is the script content itself we don't need to //invalidate/check the cache if an indexed script changes. - cache.put(cacheKey, compiled); + cache.put(cacheKey, compiledScript); } - return compiled; + + return compiledScript; } public void queryScriptIndex(GetIndexedScriptRequest request, final ActionListener listener) { @@ -334,13 +351,13 @@ public class ScriptService extends AbstractComponent implements Closeable { Template template = TemplateQueryParser.parse(scriptLang, parser, parseFieldMatcher, "params", "script", "template"); if (Strings.hasLength(template.getScript())) { //Just try and compile it - //This will have the benefit of also adding the script to the cache if it compiles try { + ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(scriptLang); //we don't know yet what the script will be used for, but if all of the operations for this lang with - //indexed scripts are disabled, it makes no sense to even compile it and cache it. - if (isAnyScriptContextEnabled(scriptLang, getScriptEngineServiceForLang(scriptLang), ScriptType.INDEXED)) { - CompiledScript compiledScript = compileInternal(template); - if (compiledScript == null) { + //indexed scripts are disabled, it makes no sense to even compile it. + if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.INDEXED)) { + Object compiled = scriptEngineService.compile(template.getScript()); + if (compiled == null) { throw new IllegalArgumentException("Unable to parse [" + template.getScript() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); } @@ -419,7 +436,7 @@ public class ScriptService extends AbstractComponent implements Closeable { * Executes a previously compiled script provided as an argument */ public ExecutableScript executable(CompiledScript compiledScript, Map vars) { - return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript.compiled(), vars); + return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript, vars); } /** @@ -427,7 +444,7 @@ public class ScriptService extends AbstractComponent implements Closeable { */ public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { CompiledScript compiledScript = compile(script, scriptContext); - return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, script.getParams()); + return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } private boolean isAnyScriptContextEnabled(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType) { @@ -513,8 +530,8 @@ public class ScriptService extends AbstractComponent implements Closeable { logger.info("compiling script file [{}]", file.toAbsolutePath()); try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8)) { String script = Streams.copyToString(reader); - String cacheKey = getCacheKey(engineService, scriptNameExt.v1()); - staticCache.put(cacheKey, new CompiledScript(engineService.types()[0], engineService.compile(script))); + String cacheKey = getCacheKey(engineService, scriptNameExt.v1(), null); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script))); } } else { logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); @@ -538,7 +555,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; logger.info("removing script file [{}]", file.toAbsolutePath()); - staticCache.remove(getCacheKey(engineService, scriptNameExt.v1())); + staticCache.remove(getCacheKey(engineService, scriptNameExt.v1(), null)); } } @@ -598,9 +615,9 @@ public class ScriptService extends AbstractComponent implements Closeable { } } - private static String getCacheKey(ScriptEngineService scriptEngineService, String script) { + private static String getCacheKey(ScriptEngineService scriptEngineService, String name, String code) { String lang = scriptEngineService.types()[0]; - return lang + ":" + script; + return lang + ":" + (name != null ? ":" + name : "") + (code != null ? ":" + code : ""); } private static class IndexedScript { diff --git a/core/src/main/java/org/elasticsearch/script/expression/ExpressionExecutableScript.java b/core/src/main/java/org/elasticsearch/script/expression/ExpressionExecutableScript.java index ff41fb8fd78..d4ae9e14140 100644 --- a/core/src/main/java/org/elasticsearch/script/expression/ExpressionExecutableScript.java +++ b/core/src/main/java/org/elasticsearch/script/expression/ExpressionExecutableScript.java @@ -20,6 +20,7 @@ package org.elasticsearch.script.expression; import org.apache.lucene.expressions.Expression; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptException; @@ -34,16 +35,18 @@ public class ExpressionExecutableScript implements ExecutableScript { private final int NO_DOCUMENT = -1; - public final Expression expression; + public final CompiledScript compiledScript; public final Map functionValuesMap; public final ReplaceableConstFunctionValues[] functionValuesArray; - public ExpressionExecutableScript(Object compiledScript, Map vars) { - expression = (Expression)compiledScript; + public ExpressionExecutableScript(CompiledScript compiledScript, Map vars) { + this.compiledScript = compiledScript; + Expression expression = (Expression)this.compiledScript.compiled(); int functionValuesLength = expression.variables.length; if (vars.size() != functionValuesLength) { - throw new ScriptException("The number of variables in an executable expression script [" + + throw new ScriptException("Error using " + compiledScript + ". " + + "The number of variables in an executable expression script [" + functionValuesLength + "] must match the number of variables in the variable map" + " [" + vars.size() + "]."); } @@ -69,17 +72,23 @@ public class ExpressionExecutableScript implements ExecutableScript { double doubleValue = ((Number)value).doubleValue(); functionValuesMap.get(name).setValue(doubleValue); } else { - throw new ScriptException("Executable expressions scripts can only process numbers." + + throw new ScriptException("Error using " + compiledScript + ". " + + "Executable expressions scripts can only process numbers." + " The variable [" + name + "] is not a number."); } } else { - throw new ScriptException("The variable [" + name + "] does not exist in the executable expressions script."); + throw new ScriptException("Error using " + compiledScript + ". " + + "The variable [" + name + "] does not exist in the executable expressions script."); } } @Override public Object run() { - return expression.evaluate(NO_DOCUMENT, functionValuesArray); + try { + return ((Expression) compiledScript.compiled()).evaluate(NO_DOCUMENT, functionValuesArray); + } catch (Exception exception) { + throw new ScriptException("Error evaluating " + compiledScript, exception); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index 2c4c8f4a25b..d9dd34c896f 100644 --- a/core/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.lookup.SearchLookup; @@ -99,79 +100,83 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } @Override - public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map vars) { - Expression expr = (Expression)compiledScript; - MapperService mapper = lookup.doc().mapperService(); - // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, - // instead of complicating SimpleBindings (which should stay simple) - SimpleBindings bindings = new SimpleBindings(); - ReplaceableConstValueSource specialValue = null; + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars) { + try { + Expression expr = (Expression)compiledScript.compiled(); + MapperService mapper = lookup.doc().mapperService(); + // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, + // instead of complicating SimpleBindings (which should stay simple) + SimpleBindings bindings = new SimpleBindings(); + ReplaceableConstValueSource specialValue = null; - for (String variable : expr.variables) { - if (variable.equals("_score")) { - bindings.add(new SortField("_score", SortField.Type.SCORE)); + for (String variable : expr.variables) { + if (variable.equals("_score")) { + bindings.add(new SortField("_score", SortField.Type.SCORE)); - } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstValueSource(); - bindings.add("_value", specialValue); - // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings - // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a - // way to know this is for aggregations and so _value is ok to have... + } else if (variable.equals("_value")) { + specialValue = new ReplaceableConstValueSource(); + bindings.add("_value", specialValue); + // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings + // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a + // way to know this is for aggregations and so _value is ok to have... + + } else if (vars != null && vars.containsKey(variable)) { + // TODO: document and/or error if vars contains _score? + // NOTE: by checking for the variable in vars first, it allows masking document fields with a global constant, + // but if we were to reverse it, we could provide a way to supply dynamic defaults for documents missing the field? + Object value = vars.get(variable); + if (value instanceof Number) { + bindings.add(variable, new DoubleConstValueSource(((Number) value).doubleValue())); + } else { + throw new ExpressionScriptCompilationException("Parameter [" + variable + "] must be a numeric type"); + } - } else if (vars != null && vars.containsKey(variable)) { - // TODO: document and/or error if vars contains _score? - // NOTE: by checking for the variable in vars first, it allows masking document fields with a global constant, - // but if we were to reverse it, we could provide a way to supply dynamic defaults for documents missing the field? - Object value = vars.get(variable); - if (value instanceof Number) { - bindings.add(variable, new DoubleConstValueSource(((Number)value).doubleValue())); } else { - throw new ExpressionScriptCompilationException("Parameter [" + variable + "] must be a numeric type"); - } + String fieldname = null; + String methodname = null; + VariableContext[] parts = VariableContext.parse(variable); + if (parts[0].text.equals("doc") == false) { + throw new ExpressionScriptCompilationException("Unknown variable [" + parts[0].text + "] in expression"); + } + if (parts.length < 2 || parts[1].type != VariableContext.Type.STR_INDEX) { + throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield']"); + } else { + fieldname = parts[1].text; + } + if (parts.length == 3) { + if (parts[2].type == VariableContext.Type.METHOD) { + methodname = parts[2].text; + } else if (parts[2].type != VariableContext.Type.MEMBER || !"value".equals(parts[2].text)) { + throw new ExpressionScriptCompilationException("Only the member variable [value] or member methods may be accessed on a field when not accessing the field directly"); + } + } + if (parts.length > 3) { + throw new ExpressionScriptCompilationException("Variable [" + variable + "] does not follow an allowed format of either doc['field'] or doc['field'].method()"); + } - } else { - String fieldname = null; - String methodname = null; - VariableContext[] parts = VariableContext.parse(variable); - if (parts[0].text.equals("doc") == false) { - throw new ExpressionScriptCompilationException("Unknown variable [" + parts[0].text + "] in expression"); - } - if (parts.length < 2 || parts[1].type != VariableContext.Type.STR_INDEX) { - throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield']"); - } else { - fieldname = parts[1].text; - } - if (parts.length == 3) { - if (parts[2].type == VariableContext.Type.METHOD) { - methodname = parts[2].text; - } else if (parts[2].type != VariableContext.Type.MEMBER || !"value".equals(parts[2].text)) { - throw new ExpressionScriptCompilationException("Only the member variable [value] or member methods may be accessed on a field when not accessing the field directly"); + MappedFieldType fieldType = mapper.smartNameFieldType(fieldname); + + if (fieldType == null) { + throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression does not exist in mappings"); + } + if (fieldType.isNumeric() == false) { + // TODO: more context (which expression?) + throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression must be numeric"); + } + + IndexFieldData fieldData = lookup.doc().fieldDataService().getForField((NumberFieldMapper.NumberFieldType) fieldType); + if (methodname == null) { + bindings.add(variable, new FieldDataValueSource(fieldData, MultiValueMode.MIN)); + } else { + bindings.add(variable, getMethodValueSource(fieldType, fieldData, fieldname, methodname)); } } - if (parts.length > 3) { - throw new ExpressionScriptCompilationException("Variable [" + variable + "] does not follow an allowed format of either doc['field'] or doc['field'].method()"); - } - - MappedFieldType fieldType = mapper.smartNameFieldType(fieldname); - - if (fieldType == null) { - throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression does not exist in mappings"); - } - if (fieldType.isNumeric() == false) { - // TODO: more context (which expression?) - throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression must be numeric"); - } - - IndexFieldData fieldData = lookup.doc().fieldDataService().getForField((NumberFieldMapper.NumberFieldType)fieldType); - if (methodname == null) { - bindings.add(variable, new FieldDataValueSource(fieldData, MultiValueMode.MIN)); - } else { - bindings.add(variable, getMethodValueSource(fieldType, fieldData, fieldname, methodname)); - } } - } - return new ExpressionSearchScript((Expression)compiledScript, bindings, specialValue); + return new ExpressionSearchScript(compiledScript, bindings, specialValue); + } catch (Exception exception) { + throw new ScriptException("Error during search with " + compiledScript, exception); + } } protected ValueSource getMethodValueSource(MappedFieldType fieldType, IndexFieldData fieldData, String fieldName, String methodName) { @@ -214,12 +219,12 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } @Override - public ExecutableScript executable(Object compiledScript, Map vars) { + public ExecutableScript executable(CompiledScript compiledScript, Map vars) { return new ExpressionExecutableScript(compiledScript, vars); } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { ExpressionExecutableScript expressionExecutableScript = new ExpressionExecutableScript(compiledScript, vars); return expressionExecutableScript.run(); } diff --git a/core/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java b/core/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java index 59a73a3ff6d..948b8d3365f 100644 --- a/core/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java +++ b/core/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java @@ -27,7 +27,9 @@ import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.SearchScript; import java.io.IOException; @@ -40,17 +42,17 @@ import java.util.Map; */ class ExpressionSearchScript implements SearchScript { - final Expression expression; + final CompiledScript compiledScript; final SimpleBindings bindings; final ValueSource source; final ReplaceableConstValueSource specialValue; // _value Scorer scorer; int docid; - ExpressionSearchScript(Expression e, SimpleBindings b, ReplaceableConstValueSource v) { - expression = e; + ExpressionSearchScript(CompiledScript c, SimpleBindings b, ReplaceableConstValueSource v) { + compiledScript = c; bindings = b; - source = expression.getValueSource(bindings); + source = ((Expression)compiledScript.compiled()).getValueSource(bindings); specialValue = v; } @@ -61,7 +63,11 @@ class ExpressionSearchScript implements SearchScript { FunctionValues values = source.getValues(Collections.singletonMap("scorer", Lucene.illegalScorer("Scores are not available in the current context")), leaf); double evaluate() { - return values.doubleVal(docid); + try { + return values.doubleVal(docid); + } catch (Exception exception) { + throw new ScriptException("Error evaluating " + compiledScript, exception); + } } @Override @@ -91,7 +97,7 @@ class ExpressionSearchScript implements SearchScript { // We have a new binding for the scorer so we need to reset the values values = source.getValues(Collections.singletonMap("scorer", scorer), leaf); } catch (IOException e) { - throw new IllegalStateException("Can't get values", e); + throw new IllegalStateException("Can't get values using " + compiledScript, e); } } @@ -109,7 +115,7 @@ class ExpressionSearchScript implements SearchScript { if (value instanceof Number) { specialValue.setValue(((Number)value).doubleValue()); } else { - throw new ExpressionScriptExecutionException("Cannot use expression with text variable"); + throw new ExpressionScriptExecutionException("Cannot use expression with text variable using " + compiledScript); } } }; diff --git a/core/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 96d63f13cc8..17c4284f714 100644 --- a/core/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -19,6 +19,8 @@ package org.elasticsearch.script.groovy; +import com.google.common.base.Charsets; +import com.google.common.hash.Hashing; import groovy.lang.Binding; import groovy.lang.GroovyClassLoader; import groovy.lang.Script; @@ -49,7 +51,6 @@ import java.io.IOException; import java.math.BigDecimal; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; /** * Provides the infrastructure for Groovy as a scripting language for Elasticsearch @@ -57,7 +58,6 @@ import java.util.concurrent.atomic.AtomicLong; public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "groovy"; - private final AtomicLong counter = new AtomicLong(); private final GroovyClassLoader loader; @Inject @@ -111,7 +111,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public Object compile(String script) { try { - return loader.parseClass(script, generateScriptName()); + return loader.parseClass(script, Hashing.sha1().hashString(script, Charsets.UTF_8).toString()); } catch (Throwable e) { if (logger.isTraceEnabled()) { logger.trace("exception compiling Groovy script:", e); @@ -135,21 +135,21 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @SuppressWarnings({"unchecked"}) @Override - public ExecutableScript executable(Object compiledScript, Map vars) { + public ExecutableScript executable(CompiledScript compiledScript, Map vars) { try { Map allVars = new HashMap<>(); if (vars != null) { allVars.putAll(vars); } - return new GroovyScript(createScript(compiledScript, allVars), this.logger); + return new GroovyScript(compiledScript, createScript(compiledScript.compiled(), allVars), this.logger); } catch (Exception e) { - throw new ScriptException("failed to build executable script", e); + throw new ScriptException("failed to build executable " + compiledScript, e); } } @SuppressWarnings({"unchecked"}) @Override - public SearchScript search(final Object compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { return new SearchScript() { @Override @@ -162,26 +162,26 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri } Script scriptObject; try { - scriptObject = createScript(compiledScript, allVars); + scriptObject = createScript(compiledScript.compiled(), allVars); } catch (InstantiationException | IllegalAccessException e) { - throw new ScriptException("failed to build search script", e); + throw new ScriptException("failed to build search " + compiledScript, e); } - return new GroovyScript(scriptObject, leafLookup, logger); + return new GroovyScript(compiledScript, scriptObject, leafLookup, logger); } }; } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { try { Map allVars = new HashMap<>(); if (vars != null) { allVars.putAll(vars); } - Script scriptObject = createScript(compiledScript, allVars); + Script scriptObject = createScript(compiledScript.compiled(), allVars); return scriptObject.run(); } catch (Exception e) { - throw new ScriptException("failed to execute script", e); + throw new ScriptException("failed to execute " + compiledScript, e); } } @@ -190,23 +190,21 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri return value; } - private String generateScriptName() { - return "Script" + counter.incrementAndGet() + ".groovy"; - } - public static final class GroovyScript implements ExecutableScript, LeafSearchScript { + private final CompiledScript compiledScript; private final Script script; private final LeafSearchLookup lookup; private final Map variables; private final ESLogger logger; - public GroovyScript(Script script, ESLogger logger) { - this(script, null, logger); + public GroovyScript(CompiledScript compiledScript, Script script, ESLogger logger) { + this(compiledScript, script, null, logger); } @SuppressWarnings("unchecked") - public GroovyScript(Script script, @Nullable LeafSearchLookup lookup, ESLogger logger) { + public GroovyScript(CompiledScript compiledScript, Script script, @Nullable LeafSearchLookup lookup, ESLogger logger) { + this.compiledScript = compiledScript; this.script = script; this.lookup = lookup; this.logger = logger; @@ -244,9 +242,9 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri return script.run(); } catch (Throwable e) { if (logger.isTraceEnabled()) { - logger.trace("exception running Groovy script", e); + logger.trace("failed to run " + compiledScript, e); } - throw new GroovyScriptExecutionException(ExceptionsHelper.detailedMessage(e)); + throw new GroovyScriptExecutionException("failed to run " + compiledScript + ": " + ExceptionsHelper.detailedMessage(e)); } } diff --git a/core/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index c40523750bb..b5d4e967af3 100644 --- a/core/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -19,6 +19,7 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.Mustache; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -29,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; @@ -98,20 +100,13 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc * @return the processed string with all given variables substitued. * */ @Override - public Object execute(Object template, Map vars) { + public Object execute(CompiledScript template, Map vars) { BytesStreamOutput result = new BytesStreamOutput(); - UTF8StreamWriter writer = utf8StreamWriter().setOutput(result); - ((Mustache) template).execute(writer, vars); - try { - writer.flush(); - } catch (IOException e) { - logger.error("Could not execute query template (failed to flush writer): ", e); - } finally { - try { - writer.close(); - } catch (IOException e) { - logger.error("Could not execute query template (failed to close writer): ", e); - } + try (UTF8StreamWriter writer = utf8StreamWriter().setOutput(result)) { + ((Mustache) template.compiled()).execute(writer, vars); + } catch (Exception e) { + logger.error("Error executing " + template, e); + throw new ScriptException("Error executing " + template, e); } return result.bytes(); } @@ -132,13 +127,13 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc } @Override - public ExecutableScript executable(Object mustache, + public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars) { - return new MustacheExecutableScript((Mustache) mustache, vars); + return new MustacheExecutableScript(compiledScript, vars); } @Override - public SearchScript search(Object compiledScript, SearchLookup lookup, + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars) { throw new UnsupportedOperationException(); } @@ -162,18 +157,17 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc * Used at query execution time by script service in order to execute a query template. * */ private class MustacheExecutableScript implements ExecutableScript { - /** Compiled template object. */ - private Mustache mustache; + /** Compiled template object wrapper. */ + private CompiledScript template; /** Parameters to fill above object with. */ private Map vars; /** - * @param mustache the compiled template object + * @param template the compiled template object wrapper * @param vars the parameters to fill above object with **/ - public MustacheExecutableScript(Mustache mustache, - Map vars) { - this.mustache = mustache; + public MustacheExecutableScript(CompiledScript template, Map vars) { + this.template = template; this.vars = vars == null ? Collections.emptyMap() : vars; } @@ -185,18 +179,11 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc @Override public Object run() { BytesStreamOutput result = new BytesStreamOutput(); - UTF8StreamWriter writer = utf8StreamWriter().setOutput(result); - mustache.execute(writer, vars); - try { - writer.flush(); - } catch (IOException e) { - logger.error("Could not execute query template (failed to flush writer): ", e); - } finally { - try { - writer.close(); - } catch (IOException e) { - logger.error("Could not execute query template (failed to close writer): ", e); - } + try (UTF8StreamWriter writer = utf8StreamWriter().setOutput(result)) { + ((Mustache) template.compiled()).execute(writer, vars); + } catch (Exception e) { + logger.error("Error running " + template, e); + throw new ScriptException("Error running " + template, e); } return result.bytes(); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 7b7f764ad93..4beacda97f1 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -75,6 +75,7 @@ import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.IndicesWarmer.WarmerContext; import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script.ScriptParseException; import org.elasticsearch.script.ScriptContext; @@ -101,6 +102,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.common.Strings.hasLength; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; /** @@ -111,7 +113,9 @@ public class SearchService extends AbstractLifecycleComponent { public static final String NORMS_LOADING_KEY = "index.norms.loading"; public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive"; public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval"; + public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout"; + public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); private final ThreadPool threadPool; @@ -137,6 +141,8 @@ public class SearchService extends AbstractLifecycleComponent { private final long defaultKeepAlive; + private volatile TimeValue defaultSearchTimeout; + private final ScheduledFuture keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -148,7 +154,7 @@ public class SearchService extends AbstractLifecycleComponent { private final ParseFieldMatcher parseFieldMatcher; @Inject - public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, + public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings); @@ -157,11 +163,23 @@ public class SearchService extends AbstractLifecycleComponent { this.clusterService = clusterService; this.indicesService = indicesService; indicesService.indicesLifecycle().addListener(new IndicesLifecycle.Listener() { + @Override + public void afterIndexClosed(Index index, @IndexSettings Settings indexSettings) { + // once an index is closed we can just clean up all the pending search context information + // to release memory and let references to the filesystem go etc. + IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index.getName()); + if (idxMeta != null && idxMeta.state() == IndexMetaData.State.CLOSE) { + // we need to check if it's really closed + // since sometimes due to a relocation we already closed the shard and that causes the index to be closed + // if we then close all the contexts we can get some search failures along the way which are not expected. + // it's fine to keep the contexts open if the index is still "alive" + // unfortunately we don't have a clear way to signal today why an index is closed. + afterIndexDeleted(index, indexSettings); + } + } @Override public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) { - // once an index is closed we can just clean up all the pending search context information - // to release memory and let references to the filesystem go etc. freeAllContextForIndex(index); } }); @@ -190,6 +208,20 @@ public class SearchService extends AbstractLifecycleComponent { this.indicesWarmer.addListener(new NormsWarmer()); this.indicesWarmer.addListener(new FieldDataWarmer()); this.indicesWarmer.addListener(new SearchWarmer()); + + defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT); + nodeSettingsService.addListener(new SearchSettingsListener()); + } + + class SearchSettingsListener implements NodeSettingsService.Listener { + @Override + public void onRefreshSettings(Settings settings) { + final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout); + if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) { + logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout); + SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout; + } + } } protected void putContext(SearchContext context) { @@ -607,7 +639,7 @@ public class SearchService extends AbstractLifecycleComponent { Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher); + SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); SearchContext.setCurrent(context); try { context.scroll(request.scroll()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index f90cdbc3438..4466bc7d3a3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -67,6 +67,7 @@ import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativeParse import org.elasticsearch.search.aggregations.pipeline.having.BucketSelectorParser; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgParser; import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModelModule; +import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffParser; import java.util.List; @@ -120,6 +121,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ pipelineAggParsers.add(CumulativeSumParser.class); pipelineAggParsers.add(BucketScriptParser.class); pipelineAggParsers.add(BucketSelectorParser.class); + pipelineAggParsers.add(SerialDiffParser.class); } /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/core/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index ef152ab11a2..ae4684dc2e0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -72,6 +72,7 @@ import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivat import org.elasticsearch.search.aggregations.pipeline.having.BucketSelectorPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.movavg.models.TransportMovAvgModelModule; +import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator; /** * A module that registers all the transport streams for the addAggregation @@ -133,6 +134,7 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM CumulativeSumPipelineAggregator.registerStreams(); BucketScriptPipelineAggregator.registerStreams(); BucketSelectorPipelineAggregator.registerStreams(); + SerialDiffPipelineAggregator.registerStreams(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java index cf2d644b70f..e6755486225 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregationBuilder.java @@ -19,12 +19,10 @@ package org.elasticsearch.search.aggregations.metrics; -import com.google.common.collect.Maps; - import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.script.Script; import java.io.IOException; -import java.util.Map; /** * @@ -32,10 +30,8 @@ import java.util.Map; public abstract class ValuesSourceMetricsAggregationBuilder> extends MetricsAggregationBuilder { private String field; - private String script; - private String lang; + private Script script; private String format; - private Map params; private Object missing; protected ValuesSourceMetricsAggregationBuilder(String name, String type) { @@ -48,43 +44,21 @@ public abstract class ValuesSourceMetricsAggregationBuilder params) { - if (this.params == null) { - this.params = params; - } else { - this.params.putAll(params); - } - return (B) this; - } - - @SuppressWarnings("unchecked") - public B param(String name, Object value) { - if (this.params == null) { - this.params = Maps.newHashMap(); - } - this.params.put(name, value); - return (B) this; - } - /** * Configure the value to use when documents miss a value. */ @@ -103,18 +77,10 @@ public abstract class ValuesSourceMetricsAggregationBuilder { private Integer window; private Integer predict; private Boolean minimize; + private Map settings; public MovAvgBuilder(String name) { super(name, MovAvgPipelineAggregator.TYPE.name()); @@ -107,6 +109,18 @@ public class MovAvgBuilder extends PipelineAggregatorBuilder { return this; } + /** + * The hash of settings that should be provided to the model when it is + * instantiated + * + * @param settings + * @return + */ + public MovAvgBuilder settings(Map settings) { + this.settings = settings; + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { @@ -128,6 +142,9 @@ public class MovAvgBuilder extends PipelineAggregatorBuilder { if (minimize != null) { builder.field(MovAvgParser.MINIMIZE.getPreferredName(), minimize); } + if (settings != null) { + builder.field(MovAvgParser.SETTINGS.getPreferredName(), settings); + } return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java index 728cb1697ed..84de794ceed 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java @@ -120,9 +120,11 @@ public class EwmaModel extends MovAvgModel { } @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, + ParseFieldMatcher parseFieldMatcher) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", 0.3); + checkUnrecognizedParams(settings); return new EwmaModel(alpha); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java index 6a3b9500e94..55611a0d000 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java @@ -180,10 +180,12 @@ public class HoltLinearModel extends MovAvgModel { } @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, + ParseFieldMatcher parseFieldMatcher) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", 0.3); double beta = parseDoubleParam(settings, "beta", 0.1); + checkUnrecognizedParams(settings); return new HoltLinearModel(alpha, beta); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java index b02082bec5c..8be63e165b8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java @@ -356,7 +356,8 @@ public class HoltWintersModel extends MovAvgModel { } @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, + ParseFieldMatcher parseFieldMatcher) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", 0.3); double beta = parseDoubleParam(settings, "beta", 0.1); @@ -376,6 +377,7 @@ public class HoltWintersModel extends MovAvgModel { if (value != null) { if (value instanceof String) { seasonalityType = SeasonalityType.parse((String)value, parseFieldMatcher); + settings.remove("type"); } else { throw new ParseException("Parameter [type] must be a String, type `" + value.getClass().getSimpleName() + "` provided instead", 0); @@ -385,6 +387,7 @@ public class HoltWintersModel extends MovAvgModel { boolean pad = parseBoolParam(settings, "pad", seasonalityType.equals(SeasonalityType.MULTIPLICATIVE)); + checkUnrecognizedParams(settings); return new HoltWintersModel(alpha, beta, gamma, period, seasonalityType, pad); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java index f6c5a2be407..264a42509b7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java @@ -107,7 +107,9 @@ public class LinearModel extends MovAvgModel { } @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, + ParseFieldMatcher parseFieldMatcher) throws ParseException { + checkUnrecognizedParams(settings); return new LinearModel(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java index 1a085b37620..3de4fceab4a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java @@ -155,7 +155,8 @@ public abstract class MovAvgModel { * @param parseFieldMatcher Matcher for field names * @return A fully built moving average model */ - public abstract MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException; + public abstract MovAvgModel parse(@Nullable Map settings, String pipelineName, + int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException; /** @@ -180,6 +181,7 @@ public abstract class MovAvgModel { } else if (value instanceof Number) { double v = ((Number) value).doubleValue(); if (v >= 0 && v <= 1) { + settings.remove(name); return v; } @@ -211,6 +213,7 @@ public abstract class MovAvgModel { if (value == null) { return defaultValue; } else if (value instanceof Number) { + settings.remove(name); return ((Number) value).intValue(); } @@ -238,12 +241,19 @@ public abstract class MovAvgModel { if (value == null) { return defaultValue; } else if (value instanceof Boolean) { + settings.remove(name); return (Boolean)value; } throw new ParseException("Parameter [" + name + "] must be a boolean, type `" + value.getClass().getSimpleName() + "` provided instead", 0); } + + protected void checkUnrecognizedParams(@Nullable Map settings) throws ParseException { + if (settings != null && settings.size() > 0) { + throw new ParseException("Unrecognized parameter(s): [" + settings.keySet() + "]", 0); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java index 8d375561dd3..e0c7781ec4a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java @@ -60,7 +60,7 @@ public class SimpleModel extends MovAvgModel { protected double[] doPredict(Collection values, int numPredictions) { double[] predictions = new double[numPredictions]; - // EWMA just emits the same final prediction repeatedly. + // Simple just emits the same final prediction repeatedly. Arrays.fill(predictions, next(values)); return predictions; @@ -100,7 +100,9 @@ public class SimpleModel extends MovAvgModel { } @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, + ParseFieldMatcher parseFieldMatcher) throws ParseException { + checkUnrecognizedParams(settings); return new SimpleModel(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffBuilder.java new file mode 100644 index 00000000000..052f3f02b28 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffBuilder.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.serialdiff; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; + +import java.io.IOException; + +public class SerialDiffBuilder extends PipelineAggregatorBuilder { + + private String format; + private GapPolicy gapPolicy; + private Integer lag; + + public SerialDiffBuilder(String name) { + super(name, SerialDiffPipelineAggregator.TYPE.name()); + } + + public SerialDiffBuilder format(String format) { + this.format = format; + return this; + } + + public SerialDiffBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + public SerialDiffBuilder lag(Integer lag) { + this.lag = lag; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(SerialDiffParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(SerialDiffParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + if (lag != null) { + builder.field(SerialDiffParser.LAG.getPreferredName(), lag); + } + return builder; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffParser.java new file mode 100644 index 00000000000..109cbcc44f5 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffParser.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.serialdiff; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; + +public class SerialDiffParser implements PipelineAggregator.Parser { + + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + public static final ParseField LAG = new ParseField("lag"); + + @Override + public String type() { + return SerialDiffPipelineAggregator.TYPE.name(); + } + + @Override + public PipelineAggregatorFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + int lag = 1; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (context.parseFieldMatcher().match(currentFieldName, FORMAT)) { + format = parser.text(); + } else if (context.parseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + bucketsPaths = new String[] { parser.text() }; + } else if (context.parseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (context.parseFieldMatcher().match(currentFieldName, LAG)) { + lag = parser.intValue(true); + if (lag <= 0) { + throw new SearchParseException(context, "Lag must be a positive, non-zero integer. Value supplied was" + + lag + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (context.parseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } else { + formatter = ValueFormatter.RAW; + } + + return new SerialDiffPipelineAggregator.Factory(reducerName, bucketsPaths, formatter, gapPolicy, lag); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java new file mode 100644 index 00000000000..3517a621427 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregator.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.serialdiff; + +import com.google.common.collect.EvictingQueue; +import com.google.common.collect.Lists; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.pipeline.*; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; +import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; + +public class SerialDiffPipelineAggregator extends PipelineAggregator { + + public final static Type TYPE = new Type("serial_diff"); + + public final static PipelineAggregatorStreams.Stream STREAM = new PipelineAggregatorStreams.Stream() { + @Override + public SerialDiffPipelineAggregator readResult(StreamInput in) throws IOException { + SerialDiffPipelineAggregator result = new SerialDiffPipelineAggregator(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + PipelineAggregatorStreams.registerStream(STREAM, TYPE.stream()); + } + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + private int lag; + + public SerialDiffPipelineAggregator() { + } + + public SerialDiffPipelineAggregator(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + int lag, Map metadata) { + super(name, bucketsPaths, metadata); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.lag = lag; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalHistogram histo = (InternalHistogram) aggregation; + List buckets = histo.getBuckets(); + InternalHistogram.Factory factory = histo.getFactory(); + + List newBuckets = new ArrayList<>(); + EvictingQueue lagWindow = EvictingQueue.create(lag); + int counter = 0; + + for (InternalHistogram.Bucket bucket : buckets) { + Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); + InternalHistogram.Bucket newBucket = bucket; + + counter += 1; + + // Still under the initial lag period, add nothing and move on + Double lagValue; + if (counter <= lag) { + lagValue = Double.NaN; + } else { + lagValue = lagWindow.peek(); // Peek here, because we rely on add'ing to always move the window + } + + // Normalize null's to NaN + if (thisBucketValue == null) { + thisBucketValue = Double.NaN; + } + + // Both have values, calculate diff and replace the "empty" bucket + if (!Double.isNaN(thisBucketValue) && !Double.isNaN(lagValue)) { + double diff = thisBucketValue - lagValue; + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); + newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( + aggs), bucket.getKeyed(), bucket.getFormatter()); + } + + + newBuckets.add(newBucket); + lagWindow.add(thisBucketValue); + + } + return factory.create(newBuckets, histo); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + lag = in.readVInt(); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + out.writeVInt(lag); + } + + public static class Factory extends PipelineAggregatorFactory { + + private final ValueFormatter formatter; + private GapPolicy gapPolicy; + private int lag; + + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, int lag) { + super(name, TYPE.name(), bucketsPaths); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.lag = lag; + } + + @Override + protected PipelineAggregator createInternal(Map metaData) throws IOException { + return new SerialDiffPipelineAggregator(name, bucketsPaths, formatter, gapPolicy, lag, metaData); + } + + } +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 3ad8ec39d94..2144d13a123 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.MinimumScoreCollector; import org.elasticsearch.common.lucene.search.FilteredCollector; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.internal.SearchContext.Lifetime; @@ -139,7 +140,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { public void search(Query query, Collector collector) throws IOException { // Wrap the caller's collector with various wrappers e.g. those used to siphon // matches off for aggregation or to impose a time-limit on collection. - final boolean timeoutSet = searchContext.timeoutInMillis() != -1; + final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; if (timeoutSet) { diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index a9f0f368c0b..6023ab3d9e5 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.function.BoostScoreFunction; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; @@ -90,7 +91,7 @@ public class DefaultSearchContext extends SearchContext { private ScanContext scanContext; private float queryBoost = 1.0f; // timeout in millis - private long timeoutInMillis = -1; + private long timeoutInMillis; // terminate after count private int terminateAfter = DEFAULT_TERMINATE_AFTER; private List groupStats; @@ -127,7 +128,9 @@ public class DefaultSearchContext extends SearchContext { public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, - BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher) { + BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, + TimeValue timeout + ) { super(parseFieldMatcher); this.id = id; this.request = request; @@ -145,6 +148,7 @@ public class DefaultSearchContext extends SearchContext { this.indexService = indexService; this.searcher = new ContextIndexSearcher(this, engineSearcher); this.timeEstimateCounter = timeEstimateCounter; + this.timeoutInMillis = timeout.millis(); } @Override diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 862eb003093..d88d0381b3e 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -84,7 +84,7 @@ import static org.elasticsearch.cluster.metadata.MetaDataIndexStateService.INDEX * {@link StoreRecoveryService#recover(IndexShard, boolean, StoreRecoveryService.RecoveryListener)} * method, which detects that shard should be restored from snapshot rather than recovered from gateway by looking * at the {@link org.elasticsearch.cluster.routing.ShardRouting#restoreSource()} property. If this property is not null - * {@code recover} method uses {@link StoreRecoveryService#restore(org.elasticsearch.indices.recovery.RecoveryState)} + * {@code recover} method uses {@link StoreRecoveryService#restore} * method to start shard restore process. *

* At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(SnapshotId, ShardId)}, @@ -100,7 +100,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis SETTING_VERSION_CREATED, SETTING_LEGACY_ROUTING_HASH_FUNCTION, SETTING_LEGACY_ROUTING_USE_TYPE, - SETTING_UUID, + SETTING_INDEX_UUID, SETTING_CREATION_DATE); // It's OK to change some settings, but we shouldn't allow simply removing them @@ -162,7 +162,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis MetaData metaDataIn = repository.readSnapshotMetaData(snapshotId, snapshot, filteredIndices); final MetaData metaData; - if (snapshot.version().before(Version.V_2_0_0)) { + if (snapshot.version().before(Version.V_2_0_0_beta1)) { // ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing in this snapshot: metaData = MetaData.addDefaultUnitsIfNeeded(logger, metaDataIn); } else { @@ -203,7 +203,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis for (Map.Entry indexEntry : renamedIndices.entrySet()) { String index = indexEntry.getValue(); boolean partial = checkPartial(index); - RestoreSource restoreSource = new RestoreSource(snapshotId, index); + RestoreSource restoreSource = new RestoreSource(snapshotId, snapshot.version(), index); String renamedIndex = indexEntry.getKey(); IndexMetaData snapshotIndexMetaData = metaData.index(index); snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings); @@ -221,7 +221,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis createIndexService.validateIndexName(renamedIndex, currentState); createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.settings()); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex); - indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); + indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.aliases().isEmpty()) { // Remove all aliases - they shouldn't be restored indexMdBuilder.removeAllAliases(); @@ -255,7 +255,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis aliases.add(alias.value); } } - indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, currentIndexMetaData.uuid())); + indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.indexUUID())); IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build(); rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource); blocks.removeIndexBlock(renamedIndex, INDEX_CLOSED_BLOCK); diff --git a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java index 01ef6a5ba0b..05429eab850 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java +++ b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java @@ -20,20 +20,22 @@ package org.elasticsearch.snapshots; import com.google.common.collect.ImmutableList; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.xcontent.*; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import static java.util.Collections.*; + /** * Represent information about snapshot */ -public class Snapshot implements Comparable, ToXContent { +public class Snapshot implements Comparable, ToXContent, FromXContentBuilder { private final String name; @@ -57,6 +59,8 @@ public class Snapshot implements Comparable, ToXContent { private final static List NO_FAILURES = ImmutableList.of(); + public final static Snapshot PROTO = new Snapshot(); + private Snapshot(String name, List indices, SnapshotState state, String reason, Version version, long startTime, long endTime, int totalShard, int successfulShards, List shardFailures) { assert name != null; @@ -86,6 +90,13 @@ public class Snapshot implements Comparable, ToXContent { startTime, endTime, totalShard, totalShard - shardFailures.size(), shardFailures); } + /** + * Special constructor for the prototype object + */ + private Snapshot() { + this("", (List) EMPTY_LIST, 0); + } + private static SnapshotState snapshotState(String reason, List shardFailures) { if (reason == null) { if (shardFailures.isEmpty()) { @@ -221,6 +232,11 @@ public class Snapshot implements Comparable, ToXContent { return result; } + @Override + public Snapshot fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + return fromXContent(parser); + } + static final class Fields { static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); static final XContentBuilderString NAME = new XContentBuilderString("name"); @@ -277,9 +293,14 @@ public class Snapshot implements Comparable, ToXContent { int totalShard = 0; int successfulShards = 0; List shardFailures = NO_FAILURES; - - XContentParser.Token token = parser.currentToken(); - if (token == XContentParser.Token.START_OBJECT) { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token + parser.nextToken(); + } + XContentParser.Token token; + if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) { String currentFieldName = parser.currentName(); if ("snapshot".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -328,6 +349,8 @@ public class Snapshot implements Comparable, ToXContent { } } } + } else { + throw new ElasticsearchParseException("unexpected token [" + token + "]"); } return new Snapshot(name, indices, state, reason, version, startTime, endTime, totalShard, successfulShards, shardFailures); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java index 150729f2cbf..27fe3de51e0 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java @@ -34,6 +34,10 @@ public class SnapshotMissingException extends SnapshotException { super(snapshot, "is missing", cause); } + public SnapshotMissingException(SnapshotId snapshot) { + super(snapshot, "is missing"); + } + public SnapshotMissingException(StreamInput in) throws IOException { super(in); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java index d65225ba0d0..84e29063ddc 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java @@ -21,8 +21,7 @@ package org.elasticsearch.snapshots; import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import java.util.HashSet; import java.util.List; @@ -75,7 +74,7 @@ public class SnapshotUtils { if (indexOrPattern.isEmpty() || !Regex.isSimpleMatchPattern(indexOrPattern)) { if (!availableIndices.contains(indexOrPattern)) { if (!indicesOptions.ignoreUnavailable()) { - throw new IndexMissingException(new Index(indexOrPattern)); + throw new IndexNotFoundException(indexOrPattern); } else { if (result == null) { // add all the previous ones... @@ -111,7 +110,7 @@ public class SnapshotUtils { } } if (!found && !indicesOptions.allowNoIndices()) { - throw new IndexMissingException(new Index(indexOrPattern)); + throw new IndexNotFoundException(indexOrPattern); } } if (result == null) { diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 5d4eae62b29..da796d1b502 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -28,10 +28,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.elasticsearch.cluster.SnapshotsInProgress.State; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.RepositoriesMetaData; -import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.metadata.*; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -82,6 +79,8 @@ public class SnapshotsService extends AbstractLifecycleComponent snapshotCompletionListeners = new CopyOnWriteArrayList<>(); @Inject - public SnapshotsService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, ThreadPool threadPool) { + public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { super(settings); this.clusterService = clusterService; + this.indexNameExpressionResolver = indexNameExpressionResolver; this.repositoriesService = repositoriesService; this.threadPool = threadPool; @@ -175,11 +175,10 @@ public class SnapshotsService extends AbstractLifecycleComponent indices = ImmutableList.copyOf(metaData.concreteIndices(request.indicesOptions(), request.indices())); + ImmutableList indices = ImmutableList.copyOf(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices); newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, System.currentTimeMillis(), null); snapshots = new SnapshotsInProgress(newSnapshot); @@ -472,7 +471,7 @@ public class SnapshotsService extends AbstractLifecycleComponent= 2) { + String number = v.number(); + if (v.isBeta()) { + number = number.replace("-beta", "_beta"); + } else if (v.isRC()) { + number = number.replace("-rc", "_rc"); + } + assertEquals("V_" + number.replace('.', '_'), constantName); + } else { + assertEquals("V_" + v.number().replace('.', '_'), constantName); + } // only the latest version for a branch should be a snapshot (ie unreleased) String branchName = "" + v.major + "." + v.minor; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfoTest.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfoTest.java new file mode 100644 index 00000000000..feb7f5a8938 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/info/PluginsInfoTest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.info; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.util.List; + +import static org.hamcrest.Matchers.contains; + +public class PluginsInfoTest extends ElasticsearchTestCase { + + @Test + public void testPluginListSorted() { + PluginsInfo pluginsInfo = new PluginsInfo(5); + pluginsInfo.add(new PluginInfo("c", "foo", true, true, "dummy")); + pluginsInfo.add(new PluginInfo("b", "foo", true, true, "dummy")); + pluginsInfo.add(new PluginInfo("e", "foo", true, true, "dummy")); + pluginsInfo.add(new PluginInfo("a", "foo", true, true, "dummy")); + pluginsInfo.add(new PluginInfo("d", "foo", true, true, "dummy")); + + final List infos = pluginsInfo.getInfos(); + List names = Lists.transform(infos, new Function() { + @Override + public String apply(PluginInfo input) { + return input.getName(); + } + }); + assertThat(names, contains("a", "b", "c", "d", "e")); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java index d2e996d6235..e8d24c774ee 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java @@ -20,13 +20,14 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -68,7 +69,7 @@ public class GetIndexTests extends ElasticsearchIntegrationTest { assertWarmers(response, "idx"); } - @Test(expected=IndexMissingException.class) + @Test(expected=IndexNotFoundException.class) public void testSimpleUnknownIndex() { client().admin().indices().prepareGetIndex().addIndices("missing_idx").get(); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreRequestTests.java new file mode 100644 index 00000000000..fee2f329d1a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreRequestTests.java @@ -0,0 +1,221 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.segments; + +import com.carrotsearch.hppc.cursors.IntObjectCursor; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.google.common.base.Predicate; +import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.store.MockFSDirectoryService; +import org.junit.Test; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.*; + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesShardStoreRequestTests extends ElasticsearchIntegrationTest { + + @Test + public void testEmpty() { + ensureGreen(); + IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores().get(); + assertThat(rsp.getStoreStatuses().size(), equalTo(0)); + } + + @Test + public void testBasic() throws Exception { + String index = "test"; + internalCluster().ensureAtLeastNumDataNodes(2); + assertAcked(prepareCreate(index).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "2") + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1") + )); + indexRandomData(index); + ensureGreen(index); + + // no unallocated shards + IndicesShardStoresResponse response = client().admin().indices().prepareShardStores(index).get(); + assertThat(response.getStoreStatuses().size(), equalTo(0)); + + // all shards + response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest(index).shardStatuses("all")).get(); + assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); + ImmutableOpenIntMap> shardStores = response.getStoreStatuses().get(index); + assertThat(shardStores.values().size(), equalTo(2)); + for (ObjectCursor> shardStoreStatuses : shardStores.values()) { + for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { + assertThat(storeStatus.getVersion(), greaterThan(-1l)); + assertThat(storeStatus.getNode(), notNullValue()); + assertThat(storeStatus.getStoreException(), nullValue()); + } + } + + // default with unassigned shards + ensureGreen(index); + logger.info("--> disable allocation"); + disableAllocation(index); + logger.info("--> stop random node"); + internalCluster().stopRandomNode(new IndexNodePredicate(index)); + response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest(index)).get(); + assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); + ImmutableOpenIntMap> shardStoresStatuses = response.getStoreStatuses().get(index); + assertThat(shardStoresStatuses.size(), greaterThan(0)); + for (IntObjectCursor> storesStatus : shardStoresStatuses) { + assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocation(), equalTo(IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY)); + } + logger.info("--> enable allocation"); + enableAllocation(index); + } + + @Test + public void testIndices() throws Exception { + String index1 = "test1"; + String index2 = "test2"; + internalCluster().ensureAtLeastNumDataNodes(2); + assertAcked(prepareCreate(index1).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "2") + )); + assertAcked(prepareCreate(index2).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "2") + )); + indexRandomData(index1); + indexRandomData(index2); + ensureGreen(); + IndicesShardStoresResponse response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")).get(); + ImmutableOpenMap>> shardStatuses = response.getStoreStatuses(); + assertThat(shardStatuses.containsKey(index1), equalTo(true)); + assertThat(shardStatuses.containsKey(index2), equalTo(true)); + assertThat(shardStatuses.get(index1).size(), equalTo(2)); + assertThat(shardStatuses.get(index2).size(), equalTo(2)); + + // ensure index filtering works + response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest(index1).shardStatuses("all")).get(); + shardStatuses = response.getStoreStatuses(); + assertThat(shardStatuses.containsKey(index1), equalTo(true)); + assertThat(shardStatuses.containsKey(index2), equalTo(false)); + assertThat(shardStatuses.get(index1).size(), equalTo(2)); + } + + @Test + public void testCorruptedShards() throws Exception { + String index = "test"; + internalCluster().ensureAtLeastNumDataNodes(2); + assertAcked(prepareCreate(index).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5") + .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) + )); + indexRandomData(index); + ensureGreen(index); + + logger.info("--> disable allocation"); + disableAllocation(index); + + logger.info("--> corrupt random shard copies"); + Map> corruptedShardIDMap = new HashMap<>(); + for (String node : internalCluster().nodesInclude(index)) { + IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + IndexService indexShards = indexServices.indexServiceSafe(index); + for (Integer shardId : indexShards.shardIds()) { + IndexShard shard = indexShards.shardSafe(shardId); + if (randomBoolean()) { + shard.failShard("test", new CorruptIndexException("test corrupted", "")); + Set nodes = corruptedShardIDMap.get(shardId); + if (nodes == null) { + nodes = new HashSet<>(); + } + nodes.add(node); + corruptedShardIDMap.put(shardId, nodes); + } + } + } + + IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores(index).setShardStatuses("all").get(); + ImmutableOpenIntMap> shardStatuses = rsp.getStoreStatuses().get(index); + assertNotNull(shardStatuses); + assertThat(shardStatuses.size(), greaterThan(0)); + for (IntObjectCursor> shardStatus : shardStatuses) { + for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) { + if (corruptedShardIDMap.containsKey(shardStatus.key) + && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) { + assertThat(status.getVersion(), greaterThanOrEqualTo(0l)); + assertThat(status.getStoreException(), notNullValue()); + } else { + assertThat(status.getVersion(), greaterThanOrEqualTo(0l)); + assertNull(status.getStoreException()); + } + } + } + logger.info("--> enable allocation"); + enableAllocation(index); + } + + private void indexRandomData(String index) throws ExecutionException, InterruptedException { + int numDocs = scaledRandomIntBetween(10, 20); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(index, "type").setSource("field", "value"); + } + indexRandom(true, builders); + client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet(); + } + + private final static class IndexNodePredicate implements Predicate { + private final Set nodesWithShard; + + public IndexNodePredicate(String index) { + this.nodesWithShard = findNodesWithShard(index); + } + + @Override + public boolean apply(Settings settings) { + return nodesWithShard.contains(settings.get("name")); + } + + private Set findNodesWithShard(String index) { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + IndexRoutingTable indexRoutingTable = state.routingTable().index(index); + List startedShards = indexRoutingTable.shardsWithState(ShardRoutingState.STARTED); + Set nodesWithShard = new HashSet<>(); + for (ShardRouting startedShard : startedShards) { + nodesWithShard.add(state.nodes().get(startedShard.currentNodeId()).getName()); + } + return nodesWithShard; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreResponseTest.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreResponseTest.java new file mode 100644 index 00000000000..a240787b307 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesShardStoreResponseTest.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.segments; + +import com.google.common.collect.ImmutableList; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.junit.Test; + +import java.io.IOException; +import java.util.*; + +import static org.hamcrest.Matchers.equalTo; + +public class IndicesShardStoreResponseTest extends ElasticsearchTestCase { + + @Test + public void testBasicSerialization() throws Exception { + ImmutableOpenMap.Builder>> indexStoreStatuses = ImmutableOpenMap.builder(); + ImmutableList.Builder failures = ImmutableList.builder(); + ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); + + DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT); + List storeStatusList = new ArrayList<>(); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, new IOException("corrupted"))); + storeStatuses.put(0, storeStatusList); + storeStatuses.put(1, storeStatusList); + ImmutableOpenIntMap> storesMap = storeStatuses.build(); + indexStoreStatuses.put("test", storesMap); + indexStoreStatuses.put("test2", storesMap); + + failures.add(new IndicesShardStoresResponse.Failure("node1", "test", 3, new NodeDisconnectedException(node1, ""))); + + IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), failures.build()); + XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); + contentBuilder.startObject(); + storesResponse.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); + contentBuilder.endObject(); + BytesReference bytes = contentBuilder.bytes(); + + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(bytes)) { + Map map = parser.map(); + List failureList = (List) map.get("failures"); + assertThat(failureList.size(), equalTo(1)); + HashMap failureMap = (HashMap) failureList.get(0); + assertThat(failureMap.containsKey("index"), equalTo(true)); + assertThat(((String) failureMap.get("index")), equalTo("test")); + assertThat(failureMap.containsKey("shard"), equalTo(true)); + assertThat(((int) failureMap.get("shard")), equalTo(3)); + assertThat(failureMap.containsKey("node"), equalTo(true)); + assertThat(((String) failureMap.get("node")), equalTo("node1")); + + Map indices = (Map) map.get("indices"); + for (String index : new String[] {"test", "test2"}) { + assertThat(indices.containsKey(index), equalTo(true)); + Map shards = ((Map) ((Map) indices.get(index)).get("shards")); + assertThat(shards.size(), equalTo(2)); + for (String shardId : shards.keySet()) { + HashMap shardStoresStatus = (HashMap) shards.get(shardId); + assertThat(shardStoresStatus.containsKey("stores"), equalTo(true)); + List stores = (ArrayList) shardStoresStatus.get("stores"); + assertThat(stores.size(), equalTo(storeStatusList.size())); + for (int i = 0; i < stores.size(); i++) { + HashMap storeInfo = ((HashMap) stores.get(i)); + IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i); + assertThat(storeInfo.containsKey("version"), equalTo(true)); + assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion()))); + assertThat(storeInfo.containsKey("allocation"), equalTo(true)); + assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocation().value())); + assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true)); + if (storeStatus.getStoreException() != null) { + assertThat(storeInfo.containsKey("store_exception"), equalTo(true)); + } + } + } + } + } + } + + @Test + public void testStoreStatusOrdering() throws Exception { + DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); + List orderedStoreStatuses = new ArrayList<>(); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); + + List storeStatuses = new ArrayList<>(orderedStoreStatuses); + Collections.shuffle(storeStatuses); + CollectionUtil.timSort(storeStatuses); + assertThat(storeStatuses, equalTo(orderedStoreStatuses)); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java index 800a49453cf..d450d2399cf 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java @@ -47,7 +47,7 @@ public class BulkProcessorClusterSettingsTests extends ElasticsearchIntegrationT assertEquals(3, responses.length); assertFalse("Operation on existing index should succeed", responses[0].isFailed()); assertTrue("Missing index should have been flagged", responses[1].isFailed()); - assertEquals("[wontwork] no such index", responses[1].getFailureMessage()); + assertEquals("[wontwork] IndexNotFoundException[no such index]", responses[1].getFailureMessage()); assertFalse("Operation on existing index should succeed", responses[2].isFailed()); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index a5a5bc45122..2295f549fbe 100644 --- a/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -64,7 +64,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { String actionName = randomAsciiOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters) { + TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); @@ -146,7 +146,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { String actionName = randomAsciiOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters) { + TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); @@ -231,7 +231,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { String actionName = randomAsciiOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters) { + TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); @@ -289,7 +289,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { String actionName = randomAsciiOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters) { + TransportAction transportAction = new TransportAction(Settings.EMPTY, actionName, null, actionFilters, null) { @Override protected void doExecute(TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java index 97432f1be93..8473cc1e5cb 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -246,7 +247,7 @@ public class ShardReplicationTests extends ElasticsearchTestCase { ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); - state.metaData(MetaData.builder().put(indexMetaData, false).generateUuidIfNeeded()); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); return state.build(); } @@ -693,7 +694,7 @@ public class ShardReplicationTests extends ElasticsearchTestCase { ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, null, threadPool, new ShardStateAction(settings, clusterService, transportService, null, null), null, - new ActionFilters(new HashSet()), Request.class, Request.class, ThreadPool.Names.SAME); + new ActionFilters(new HashSet()), new IndexNameExpressionResolver(), Request.class, Request.class, ThreadPool.Names.SAME); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java index 31d5af8d8bc..06b21bbaabc 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.inject.internal.Join; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.*; import org.elasticsearch.test.ElasticsearchIntegrationTest; import java.io.IOException; @@ -262,7 +262,7 @@ public abstract class AbstractTermVectorsTests extends ElasticsearchIntegrationT } // always adds a test that fails configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist").alias("doesn't_exist"), - new String[]{"doesnt_exist"}, true, true, true).expectedException(IndexMissingException.class)); + new String[]{"doesnt_exist"}, true, true, true).expectedException(org.elasticsearch.index.IndexNotFoundException.class)); refresh(); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 9c459283fb2..01548f9d84b 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -39,8 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryParsingException; -import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException; +import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -531,7 +529,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { } - @Test(expected = AliasesMissingException.class) + @Test(expected = AliasesNotFoundException.class) public void testIndicesRemoveNonExistingAliasResponds404() throws Exception { logger.info("--> creating index [test]"); createIndex("test"); @@ -909,29 +907,21 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { } @Test + // Before 2.0 alias filters were parsed at alias creation time, in order + // for filters to work correctly ES required that fields mentioned in those + // filters exist in the mapping. + // From 2.0 and higher alias filters are parsed at request time and therefor + // fields mentioned in filters don't need to exist in the mapping. public void testAddAliasWithFilterNoMapping() throws Exception { assertAcked(prepareCreate("test")); - - try { - client().admin().indices().prepareAliases() - .addAlias("test", "a", QueryBuilders.termQuery("field1", "term")) - .get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.getCause(), instanceOf(QueryParsingException.class)); - } - - try { - client().admin().indices().prepareAliases() - .addAlias("test", "a", QueryBuilders.rangeQuery("field2").from(0).to(1)) - .get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.getCause(), instanceOf(QueryParsingException.class)); - } - client().admin().indices().prepareAliases() - .addAlias("test", "a", QueryBuilders.matchAllQuery()) // <-- no fail, b/c no field mentioned + .addAlias("test", "a", QueryBuilders.termQuery("field1", "term")) + .get(); + client().admin().indices().prepareAliases() + .addAlias("test", "a", QueryBuilders.rangeQuery("field2").from(0).to(1)) + .get(); + client().admin().indices().prepareAliases() + .addAlias("test", "a", QueryBuilders.matchAllQuery()) .get(); } diff --git a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java index 0d3375d1c6b..9648947af63 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.node.Node; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.transport.TransportModule; @@ -81,7 +81,7 @@ public class ReplicaRecoveryBenchmark { indexer.setMaxFieldSize(150); try { client1.admin().indices().prepareDelete(INDEX_NAME).get(); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { } client1.admin().indices().prepareCreate(INDEX_NAME).get(); indexer.start(DOC_COUNT / 2); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 1ed5df3b751..0106f2a103d 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.Loggers; import java.io.FilePermission; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.file.Path; import java.security.Permissions; import java.security.Policy; @@ -83,15 +85,21 @@ public class BootstrapForTesting { Security.setCodebaseProperties(); // initialize paths the same exact way as bootstrap. Permissions perms = new Permissions(); - Path basedir = PathUtils.get(Objects.requireNonNull(System.getProperty("project.basedir"), - "please set ${project.basedir} in pom.xml")); - // target/classes, target/test-classes - Security.addPath(perms, basedir.resolve("target").resolve("classes"), "read,readlink"); - Security.addPath(perms, basedir.resolve("target").resolve("test-classes"), "read,readlink"); - // .m2/repository - Path m2repoDir = PathUtils.get(Objects.requireNonNull(System.getProperty("m2.repository"), - "please set ${m2.repository} in pom.xml")); - Security.addPath(perms, m2repoDir, "read,readlink"); + // add permissions to everything in classpath + for (URL url : ((URLClassLoader)BootstrapForTesting.class.getClassLoader()).getURLs()) { + Path path = PathUtils.get(url.toURI()); + // resource itself + perms.add(new FilePermission(path.toString(), "read,readlink")); + // classes underneath + perms.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", "read,readlink")); + + // crazy jython... + String filename = path.getFileName().toString(); + if (filename.contains("jython") && filename.endsWith(".jar")) { + // just enough so it won't fail when it does not exist + perms.add(new FilePermission(path.getParent().resolve("Lib").toString(), "read,readlink")); + } + } // java.io.tmpdir Security.addPath(perms, javaTmpDir, "read,readlink,write,delete"); // custom test config file diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index d87a7e295f1..819cb1f51e4 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.Version; import org.elasticsearch.test.ElasticsearchTestCase; -import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.net.URL; import java.nio.file.Files; @@ -54,7 +53,7 @@ public class JarHellTests extends ElasticsearchTestCase { URL makeFile(Path dir, String name) throws IOException { Path filepath = dir.resolve(name); Files.newOutputStream(filepath, StandardOpenOption.CREATE).close(); - return filepath.toUri().toURL(); + return dir.toUri().toURL(); } public void testDifferentJars() throws Exception { @@ -62,6 +61,7 @@ public class JarHellTests extends ElasticsearchTestCase { URL[] jars = {makeJar(dir, "foo.jar", null, "DuplicateClass.class"), makeJar(dir, "bar.jar", null, "DuplicateClass.class")}; try { JarHell.checkJarHell(jars); + fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); assertTrue(e.getMessage().contains("DuplicateClass")); @@ -95,6 +95,7 @@ public class JarHellTests extends ElasticsearchTestCase { URL[] dirs = {makeFile(dir1, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")}; try { JarHell.checkJarHell(dirs); + fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); assertTrue(e.getMessage().contains("DuplicateClass")); @@ -109,6 +110,7 @@ public class JarHellTests extends ElasticsearchTestCase { URL[] dirs = {makeJar(dir1, "foo.jar", null, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")}; try { JarHell.checkJarHell(dirs); + fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); assertTrue(e.getMessage().contains("DuplicateClass")); @@ -135,6 +137,7 @@ public class JarHellTests extends ElasticsearchTestCase { URL[] jars = {JarHellTests.class.getResource("duplicate-classes.jar")}; try { JarHell.checkJarHell(jars); + fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("jar hell!")); assertTrue(e.getMessage().contains("DuplicateClass")); @@ -154,12 +157,15 @@ public class JarHellTests extends ElasticsearchTestCase { System.setProperty("java.specification.version", "1.7"); Manifest manifest = new Manifest(); - manifest.getMainAttributes().put(new Attributes.Name("X-Compile-Target-JDK"), "1.8"); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.8"); URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; try { JarHell.checkJarHell(jars); + fail("did not get expected exception"); } catch (IllegalStateException e) { - assertTrue(e.getMessage().contains("requires java 1.8")); + assertTrue(e.getMessage().contains("requires Java 1.8")); assertTrue(e.getMessage().contains("your system: 1.7")); } finally { System.setProperty("java.specification.version", previousJavaVersion); @@ -172,7 +178,9 @@ public class JarHellTests extends ElasticsearchTestCase { System.setProperty("java.specification.version", "1.7"); Manifest manifest = new Manifest(); - manifest.getMainAttributes().put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; try { JarHell.checkJarHell(jars); @@ -187,7 +195,9 @@ public class JarHellTests extends ElasticsearchTestCase { System.setProperty("java.specification.version", "bogus"); Manifest manifest = new Manifest(); - manifest.getMainAttributes().put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; try { JarHell.checkJarHell(jars); @@ -199,8 +209,37 @@ public class JarHellTests extends ElasticsearchTestCase { public void testBadJDKVersionInJar() throws Exception { Path dir = createTempDir(); Manifest manifest = new Manifest(); - manifest.getMainAttributes().put(new Attributes.Name("X-Compile-Target-JDK"), "bogus"); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "bogus"); URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; JarHell.checkJarHell(jars); } + + /** make sure if a plugin is compiled against the same ES version, it works */ + public void testGoodESVersionInJar() throws Exception { + Path dir = createTempDir(); + Manifest manifest = new Manifest(); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString()); + URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + JarHell.checkJarHell(jars); + } + + /** make sure if a plugin is compiled against a different ES version, it fails */ + public void testBadESVersionInJar() throws Exception { + Path dir = createTempDir(); + Manifest manifest = new Manifest(); + Attributes attributes = manifest.getMainAttributes(); + attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); + attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus"); + URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + try { + JarHell.checkJarHell(jars); + fail("did not get expected exception"); + } catch (IllegalStateException e) { + assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index 51d08d05bf7..3c3db267292 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ElasticsearchTestCase; @@ -207,6 +208,8 @@ public class SecurityTests extends ElasticsearchTestCase { /** When a configured dir is a symlink, test that permissions work on link target */ public void testSymlinkPermissions() throws IOException { + // see https://github.com/elastic/elasticsearch/issues/12170 + assumeFalse("windows does not automatically grant permission to the target of symlinks", Constants.WINDOWS); Path dir = createTempDir(); Path target = dir.resolve("target"); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java index 77360425f26..8315ffa32da 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java @@ -283,7 +283,7 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa indexRandom(true, docs); assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get(); + disableAllocation("test"); backwardsCluster().allowOnAllNodes("test"); CountResponse countResponse = client().prepareCount().get(); assertHitCount(countResponse, numDocs); @@ -295,7 +295,7 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa } indexRandom(true, docs); } - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get(); + enableAllocation("test"); ensureYellow(); final int numIters = randomIntBetween(1, 20); for (int i = 0; i < numIters; i++) { @@ -328,7 +328,7 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa for (String index : indices) { assertAllShardsOnNodes(index, backwardsCluster().backwardsNodePattern()); } - client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get(); + disableAllocation(indices); backwardsCluster().allowOnAllNodes(indices); logClusterState(); boolean upgraded; @@ -346,7 +346,7 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa } indexRandom(true, docs); } while (upgraded); - client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get(); + enableAllocation(indices); ensureYellow(); CountResponse countResponse = client().prepareCount().get(); assertHitCount(countResponse, numDocs); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 7dd1839c617..76d44642e41 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -24,6 +24,7 @@ import com.google.common.util.concurrent.ListenableFuture; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.get.GetResponse; @@ -38,7 +39,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.MultiDataPathUpgrader; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.IndexException; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; @@ -304,8 +304,8 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio try { client().admin().indices().prepareOpen(indexName).get(); fail("Shouldn't be able to open an old index"); - } catch (IndexException ex) { - assertThat(ex.getMessage(), containsString("cannot open the index due to upgrade failure")); + } catch (IllegalStateException ex) { + assertThat(ex.getMessage(), containsString("was created before v0.90.0 and wasn't upgraded")); } unloadIndex(indexName); logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds"); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java index 999acfdda25..a0d3dc4e10a 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java @@ -40,6 +40,7 @@ import org.junit.Test; import java.io.IOException; import java.lang.reflect.Modifier; import java.net.URI; +import java.net.URISyntaxException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; @@ -59,10 +60,25 @@ public class RestoreBackwardsCompatTests extends AbstractSnapshotTests { @Override protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("path.repo", reposRoot()) - .build(); + if (randomBoolean()) { + // Configure using path.repo + return settingsBuilder() + .put(super.nodeSettings(nodeOrdinal)) + .put("path.repo", reposRoot()) + .build(); + } else { + // Configure using url white list + try { + URI repoJarPatternUri = new URI("jar:" + reposRoot().toUri().toString() + "*.zip!/repo/"); + return settingsBuilder() + .put(super.nodeSettings(nodeOrdinal)) + .putArray("repositories.url.allowed_urls", repoJarPatternUri.toString()) + .build(); + } catch (URISyntaxException ex) { + throw new IllegalArgumentException(ex); + } + + } } @Test @@ -142,7 +158,7 @@ public class RestoreBackwardsCompatTests extends AbstractSnapshotTests { private void createRepo(String prefix, String version, String repo) throws Exception { String repoFile = prefix + "-" + version + ".zip"; - URI repoFileUri = getClass().getResource(repoFile).toURI(); + URI repoFileUri = getDataPath(repoFile).toUri(); URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/"); logger.info("--> creating repository [{}] for version [{}]", repo, version); assertAcked(client().admin().cluster().preparePutRepository(repo) diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 429cafdef54..8293fbb3a60 100644 --- a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.client.node; -import com.google.common.collect.ImmutableSet; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.GenericAction; @@ -63,7 +61,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTests { private static class InternalTransportAction extends TransportAction { private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) { - super(settings, actionName, threadPool, EMPTY_FILTERS); + super(settings, actionName, threadPool, EMPTY_FILTERS, null); } @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java index 00d41fdd7b9..17b150027ef 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth; import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -45,6 +46,7 @@ import static org.hamcrest.Matchers.*; public class ClusterHealthResponsesTests extends ElasticsearchTestCase { + private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); private void assertIndexHealth(ClusterIndexHealth indexHealth, ShardCounter counter, IndexMetaData indexMetaData) { assertThat(indexHealth.getStatus(), equalTo(counter.status())); @@ -196,7 +198,7 @@ public class ClusterHealthResponsesTests extends ElasticsearchTestCase { int inFlight = randomIntBetween(0, 200); int delayedUnassigned = randomIntBetween(0, 200); TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000)); - ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(IndicesOptions.strictExpand(), (String[]) null), clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); + ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null), clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); logger.info("cluster status: {}, expected {}", clusterHealth.getStatus(), counter.status()); clusterHealth = maybeSerialize(clusterHealth); assertClusterHealth(clusterHealth, counter); @@ -232,7 +234,7 @@ public class ClusterHealthResponsesTests extends ElasticsearchTestCase { metaData.put(indexMetaData, true); routingTable.add(indexRoutingTable); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); - ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(IndicesOptions.strictExpand(), (String[]) null), clusterState, 0, 0, 0, TimeValue.timeValueMillis(0)); + ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null), clusterState, 0, 0, 0, TimeValue.timeValueMillis(0)); clusterHealth = maybeSerialize(clusterHealth); // currently we have no cluster level validation failures as index validation issues are reported per index. assertThat(clusterHealth.getValidationFailures(), Matchers.hasSize(0)); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java index 10c296addb2..826634dcf95 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java @@ -88,7 +88,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() { @Override public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid()); + logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.stateUUID()); newStateProcessed.onNewClusterStateProcessed(); } }); @@ -393,7 +393,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { @Override public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff()); + logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.stateUUID(), clusterState.wasReadFromDiff()); assertFalse(clusterState.wasReadFromDiff()); newStateProcessed.onNewClusterStateProcessed(); } @@ -497,7 +497,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { } }); - ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) { + ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) { @Override public Diff diff(ClusterState previousState) { return new Diff() { @@ -616,7 +616,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { public static class DelegatingClusterState extends ClusterState { public DelegatingClusterState(ClusterState clusterState) { - super(clusterState.version(), clusterState.uuid(), clusterState); + super(clusterState.version(), clusterState.stateUUID(), clusterState); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java index 6b69422be2b..12967f6f34d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java @@ -116,7 +116,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { try { // Check non-diffable elements assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version())); - assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid())); + assertThat(clusterStateFromDiffs.stateUUID(), equalTo(clusterState.stateUUID())); // Check nodes assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); @@ -141,7 +141,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { // Check metadata assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); - assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid())); + assertThat(clusterStateFromDiffs.metaData().clusterUUID(), equalTo(clusterState.metaData().clusterUUID())); assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings())); assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings())); assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices())); @@ -485,7 +485,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { } break; case 2: - builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); + builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID())); break; case 3: builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java index ea4c0eb5dee..b0b40496b91 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster; import com.google.common.base.Predicate; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Priority; @@ -44,6 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.hamcrest.Matchers.*; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) +@LuceneTestCase.AwaitsFix(bugUrl = "boaz is looking at failures in this test class. Example failure: http://build-us-00.elastic.co/job/es_g1gc_master_metal/11653/") public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest { @Test diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java index c102251862f..d3b26ac6307 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.hamcrest.CollectionAssertions; import org.junit.Before; @@ -195,14 +195,14 @@ public class SimpleClusterStateTests extends ElasticsearchIntegrationTest { assertThat(clusterStateResponse.getState().metaData().indices().isEmpty(), is(true)); } - @Test(expected=IndexMissingException.class) + @Test(expected=IndexNotFoundException.class) public void testIndicesOptionsOnAllowNoIndicesFalse() throws Exception { // empty wildcard expansion throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, false, true, false); client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("a*").setIndicesOptions(allowNoIndices).get(); } - @Test(expected=IndexMissingException.class) + @Test(expected=IndexNotFoundException.class) public void testIndicesIgnoreUnavailableFalse() throws Exception { // ignore_unavailable set to false throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java new file mode 100644 index 00000000000..9ffaaa6c331 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -0,0 +1,900 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.google.common.collect.Sets; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData.State; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +import static com.google.common.collect.Sets.newHashSet; +import static org.hamcrest.Matchers.*; + +/** + */ +public class IndexNameExpressionResolverTests extends ElasticsearchTestCase { + + @Test + public void testIndexOptions_strict() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); + + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.strictExpandOpen(), IndicesOptions.strictExpand()}; + for (IndicesOptions options : indicesOptions) { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + try { + indexNameExpressionResolver.concreteIndices(context, "bar"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); + + results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), + new HashSet<>(Arrays.asList(results))); + + try { + indexNameExpressionResolver.concreteIndices(context, "bar"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + try { + indexNameExpressionResolver.concreteIndices(context, "foo", "bar"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); + + try { + indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + assertThat(results, emptyArray()); + + results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + } + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); + String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(3, results.length); + + results = indexNameExpressionResolver.concreteIndices(context, null); + assertEquals(3, results.length); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand()); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(4, results.length); + + results = indexNameExpressionResolver.concreteIndices(context, null); + assertEquals(4, results.length); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); + results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + assertEquals(3, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand()); + results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + assertEquals(4, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); + } + + @Test + public void testIndexOptions_lenient() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndicesOptions lenientExpand = IndicesOptions.fromOptions(true, true, true, true); + IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand}; + for (IndicesOptions options : indicesOptions) { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "bar"); + assertThat(results, emptyArray()); + + results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); + + results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(2, results.length); + assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), + new HashSet<>(Arrays.asList(results))); + + results = indexNameExpressionResolver.concreteIndices(context, "foo", "bar"); + assertEquals(1, results.length); + assertThat(results, arrayContainingInAnyOrder("foo")); + + results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); + + results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar"); + assertEquals(1, results.length); + assertThat(results, arrayContainingInAnyOrder("foofoo")); + + results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + assertThat(results, emptyArray()); + + results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + } + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(3, results.length); + + context = new IndexNameExpressionResolver.Context(state, lenientExpand); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(4, results.length); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + assertEquals(3, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); + + context = new IndexNameExpressionResolver.Context(state, lenientExpand); + results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + assertEquals(4, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); + } + + @Test + public void testIndexOptions_allowUnavailableDisallowEmpty() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo")) + .put(indexBuilder("foobar")) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndicesOptions expandOpen = IndicesOptions.fromOptions(true, false, true, false); + IndicesOptions expand = IndicesOptions.fromOptions(true, false, true, true); + IndicesOptions[] indicesOptions = new IndicesOptions[]{expandOpen, expand}; + + for (IndicesOptions options : indicesOptions) { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + try { + indexNameExpressionResolver.concreteIndices(context, "bar"); + fail(); + } catch(IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + try { + indexNameExpressionResolver.concreteIndices(context, "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + try { + indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + } + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, expandOpen); + String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(3, results.length); + + context = new IndexNameExpressionResolver.Context(state, expand); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(4, results.length); + } + + @Test + public void testIndexOptions_wildcardExpansion() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("bar")) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("barbaz"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + // Only closed + IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); + String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + // no wildcards, so wildcard expansion don't apply + results = indexNameExpressionResolver.concreteIndices(context, "bar"); + assertEquals(1, results.length); + assertEquals("bar", results[0]); + + // Only open + options = IndicesOptions.fromOptions(false, true, true, false); + context = new IndexNameExpressionResolver.Context(state, options); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("bar", "foobar")); + + results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + assertEquals(1, results.length); + assertEquals("foobar", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "bar"); + assertEquals(1, results.length); + assertEquals("bar", results[0]); + + // Open and closed + options = IndicesOptions.fromOptions(false, true, true, true); + context = new IndexNameExpressionResolver.Context(state, options); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(3, results.length); + assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo")); + + results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foobar", "foo")); + + results = indexNameExpressionResolver.concreteIndices(context, "bar"); + assertEquals(1, results.length); + assertEquals("bar", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "-foo*"); + assertEquals(1, results.length); + assertEquals("bar", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "-*"); + assertEquals(0, results.length); + + options = IndicesOptions.fromOptions(false, false, true, true); + context = new IndexNameExpressionResolver.Context(state, options); + try { + indexNameExpressionResolver.concreteIndices(context, "-*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getResourceId().toString(), equalTo("[-*]")); + } + } + + @Test + public void testIndexOptions_noExpandWildcards() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + //ignore unavailable and allow no indices + { + IndicesOptions noExpandLenient = IndicesOptions.fromOptions(true, true, false, false); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandLenient); + String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + assertThat(results, emptyArray()); + + results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); + + results = indexNameExpressionResolver.concreteIndices(context, null); + assertEquals(0, results.length); + + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertEquals(0, results.length); + } + + //ignore unavailable but don't allow no indices + { + IndicesOptions noExpandDisallowEmpty = IndicesOptions.fromOptions(true, false, false, false); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandDisallowEmpty); + try { + indexNameExpressionResolver.concreteIndices(context, "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + assertEquals(1, results.length); + assertEquals("foo", results[0]); + + results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); + } + + //error on unavailable but allow no indices + { + IndicesOptions noExpandErrorUnavailable = IndicesOptions.fromOptions(false, true, false, false); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandErrorUnavailable); + String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + assertThat(results, emptyArray()); + + try { + indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); + } + + //error on both unavailable and no indices + { + IndicesOptions noExpandStrict = IndicesOptions.fromOptions(false, false, false, false); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandStrict); + try { + indexNameExpressionResolver.concreteIndices(context, "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + try { + indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + String[] results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); + } + } + + @Test + public void testIndexOptions_singleIndexNoExpandWildcards() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + //error on both unavailable and no indices + every alias needs to expand to a single index + + try { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + indexNameExpressionResolver.concreteIndices(context, "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + try { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("baz*")); + } + + try { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + fail(); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); + } + + try { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + indexNameExpressionResolver.concreteIndices(context, "foo", "foofoobar"); + fail(); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); + } + + try { + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + indexNameExpressionResolver.concreteIndices(context, "foofoo-closed", "foofoobar"); + fail(); + } catch(IndexClosedException e) { + assertThat(e.getMessage(), equalTo("closed")); + assertEquals(e.getIndex(), "foofoo-closed"); + } + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "barbaz"); + assertEquals(2, results.length); + assertThat(results, arrayContainingInAnyOrder("foo", "foofoo")); + } + + @Test + public void testIndexOptions_emptyCluster() { + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(MetaData.builder().build()).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndicesOptions options = IndicesOptions.strictExpandOpen(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); + String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertThat(results, emptyArray()); + try { + indexNameExpressionResolver.concreteIndices(context, "foo"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("foo")); + } + results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + assertThat(results, emptyArray()); + try { + indexNameExpressionResolver.concreteIndices(context, "foo*", "bar"); + fail(); + } catch (IndexNotFoundException e) { + assertThat(e.getIndex(), equalTo("bar")); + } + + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + assertThat(results, emptyArray()); + results = indexNameExpressionResolver.concreteIndices(context, "foo"); + assertThat(results, emptyArray()); + results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + assertThat(results, emptyArray()); + results = indexNameExpressionResolver.concreteIndices(context, "foo*", "bar"); + assertThat(results, emptyArray()); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); + try { + indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + } catch (IndexNotFoundException e) { + assertThat(e.getResourceId().toString(), equalTo("[_all]")); + } + } + + private IndexMetaData.Builder indexBuilder(String index) { + return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + } + + @Test(expected = IndexNotFoundException.class) + public void testConcreteIndicesIgnoreIndicesOneMissingIndex() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); + + indexNameExpressionResolver.concreteIndices(context, "testZZZ"); + } + + @Test + public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); + } + + @Test(expected = IndexNotFoundException.class) + public void testConcreteIndicesIgnoreIndicesAllMissing() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testMo", "testMahdy")), equalTo(newHashSet("testXXX"))); + } + + @Test + public void testConcreteIndicesIgnoreIndicesEmptyRequest() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, new String[]{})), equalTo(Sets.newHashSet("kuku", "testXXX"))); + } + + @Test + public void testConcreteIndicesWildcardExpansion() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX").state(State.OPEN)) + .put(indexBuilder("testXXY").state(State.OPEN)) + .put(indexBuilder("testXYY").state(State.CLOSE)) + .put(indexBuilder("testYYY").state(State.OPEN)) + .put(indexBuilder("testYYX").state(State.OPEN)); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(new HashSet())); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXYY"))); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + } + + /** + * test resolving _all pattern (null, empty array or "_all") for random IndicesOptions + */ + @Test + public void testConcreteIndicesAllPatternRandom() { + for (int i = 0; i < 10; i++) { + String[] allIndices = null; + switch (randomIntBetween(0, 2)) { + case 0: + break; + case 1: + allIndices = new String[0]; + break; + case 2: + allIndices = new String[] { MetaData.ALL }; + break; + } + + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(MetaData.builder().build()).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, indicesOptions); + + // with no indices, asking for all indices should return empty list or exception, depending on indices options + if (indicesOptions.allowNoIndices()) { + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices); + assertThat(concreteIndices, notNullValue()); + assertThat(concreteIndices.length, equalTo(0)); + } else { + checkCorrectException(indexNameExpressionResolver, context, allIndices); + } + + // with existing indices, asking for all indices should return all open/closed indices depending on options + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1"))) + .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1"))) + .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1"))); + state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + indexNameExpressionResolver = new IndexNameExpressionResolver(); + context = new IndexNameExpressionResolver.Context(state, indicesOptions); + if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed() || indicesOptions.allowNoIndices()) { + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices); + assertThat(concreteIndices, notNullValue()); + int expectedNumberOfIndices = 0; + if (indicesOptions.expandWildcardsOpen()) { + expectedNumberOfIndices += 2; + } + if (indicesOptions.expandWildcardsClosed()) { + expectedNumberOfIndices += 1; + } + assertThat(concreteIndices.length, equalTo(expectedNumberOfIndices)); + } else { + checkCorrectException(indexNameExpressionResolver, context, allIndices); + } + } + } + + /** + * check for correct exception type depending on indicesOptions and provided index name list + */ + private void checkCorrectException(IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver.Context context, String[] allIndices) { + try { + indexNameExpressionResolver.concreteIndices(context, allIndices); + fail("wildcard expansion on should trigger IndexMissingException"); + } catch (IndexNotFoundException e) { + // expected + } + } + + /** + * test resolving wildcard pattern that matches no index of alias for random IndicesOptions + */ + @Test + public void testConcreteIndicesWildcardNoMatch() { + for (int i = 0; i < 10; i++) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1"))) + .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1"))) + .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, indicesOptions); + + // asking for non existing wildcard pattern should return empty list or exception + if (indicesOptions.allowNoIndices()) { + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, "Foo*"); + assertThat(concreteIndices, notNullValue()); + assertThat(concreteIndices.length, equalTo(0)); + } else { + try { + indexNameExpressionResolver.concreteIndices(context, "Foo*"); + fail("expecting exeption when result empty and allowNoIndicec=false"); + } catch (IndexNotFoundException e) { + // expected exception + } + } + } + } + + @Test + public void testIsAllIndices_null() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(null), equalTo(true)); + } + + @Test + public void testIsAllIndices_empty() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(Collections.emptyList()), equalTo(true)); + } + + @Test + public void testIsAllIndices_explicitAll() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("_all")), equalTo(true)); + } + + @Test + public void testIsAllIndices_explicitAllPlusOther() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("_all", "other")), equalTo(false)); + } + + @Test + public void testIsAllIndices_normalIndexes() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("index1", "index2", "index3")), equalTo(false)); + } + + @Test + public void testIsAllIndices_wildcard() throws Exception { + assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("*")), equalTo(false)); + } + + @Test + public void testIsExplicitAllIndices_null() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(null), equalTo(false)); + } + + @Test + public void testIsExplicitAllIndices_empty() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Collections.emptyList()), equalTo(false)); + } + + @Test + public void testIsExplicitAllIndices_explicitAll() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("_all")), equalTo(true)); + } + + @Test + public void testIsExplicitAllIndices_explicitAllPlusOther() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("_all", "other")), equalTo(false)); + } + + @Test + public void testIsExplicitAllIndices_normalIndexes() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("index1", "index2", "index3")), equalTo(false)); + } + + @Test + public void testIsExplicitAllIndices_wildcard() throws Exception { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("*")), equalTo(false)); + } + + @Test + public void testIsPatternMatchingAllIndices_explicitList() throws Exception { + //even though it does identify all indices, it's not a pattern but just an explicit list of them + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(concreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, concreteIndices, concreteIndices), equalTo(false)); + } + + @Test + public void testIsPatternMatchingAllIndices_onlyWildcard() throws Exception { + String[] indicesOrAliases = new String[]{"*"}; + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(concreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(true)); + } + + @Test + public void testIsPatternMatchingAllIndices_matchingTrailingWildcard() throws Exception { + String[] indicesOrAliases = new String[]{"index*"}; + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(concreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(true)); + } + + @Test + public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcard() throws Exception { + String[] indicesOrAliases = new String[]{"index*"}; + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + String[] allConcreteIndices = new String[]{"index1", "index2", "index3", "a", "b"}; + MetaData metaData = metaDataBuilder(allConcreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(false)); + } + + @Test + public void testIsPatternMatchingAllIndices_matchingSingleExclusion() throws Exception { + String[] indicesOrAliases = new String[]{"-index1", "+index1"}; + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(concreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(true)); + } + + @Test + public void testIsPatternMatchingAllIndices_nonMatchingSingleExclusion() throws Exception { + String[] indicesOrAliases = new String[]{"-index1"}; + String[] concreteIndices = new String[]{"index2", "index3"}; + String[] allConcreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(allConcreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(false)); + } + + @Test + public void testIsPatternMatchingAllIndices_matchingTrailingWildcardAndExclusion() throws Exception { + String[] indicesOrAliases = new String[]{"index*", "-index1", "+index1"}; + String[] concreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(concreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(true)); + } + + @Test + public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcardAndExclusion() throws Exception { + String[] indicesOrAliases = new String[]{"index*", "-index1"}; + String[] concreteIndices = new String[]{"index2", "index3"}; + String[] allConcreteIndices = new String[]{"index1", "index2", "index3"}; + MetaData metaData = metaDataBuilder(allConcreteIndices); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + assertThat(indexNameExpressionResolver.isPatternMatchingAllIndices(metaData, indicesOrAliases, concreteIndices), equalTo(false)); + } + + @Test + public void testIndexOptions_failClosedIndicesAndAliases() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) + .put(indexBuilder("foo2-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar2-closed"))) + .put(indexBuilder("foo3").putAlias(AliasMetaData.builder("foobar2-closed"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); + try { + indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + fail("foo1-closed should be closed, but it is open"); + } catch (IndexClosedException e) { + // expected + } + + try { + indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + fail("foo1-closed should be closed, but it is open"); + } catch (IndexClosedException e) { + // expected + } + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); + String[] results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + assertThat(results, emptyArray()); + + results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + assertThat(results, emptyArray()); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + assertThat(results, arrayWithSize(1)); + assertThat(results, arrayContaining("foo1-closed")); + + results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + assertThat(results, arrayWithSize(1)); + assertThat(results, arrayContaining("foo1-closed")); + + // testing an alias pointing to three indices: + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); + try { + indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + fail("foo2-closed should be closed, but it is open"); + } catch (IndexClosedException e) { + // expected + } + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); + results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + assertThat(results, arrayWithSize(1)); + assertThat(results, arrayContaining("foo3")); + + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + assertThat(results, arrayWithSize(3)); + assertThat(results, arrayContainingInAnyOrder("foo1-closed", "foo2-closed", "foo3")); + } + + private MetaData metaDataBuilder(String... indices) { + MetaData.Builder mdBuilder = MetaData.builder(); + for (String concreteIndex : indices) { + mdBuilder.put(indexBuilder(concreteIndex)); + } + return mdBuilder.build(); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java deleted file mode 100644 index 7d780e95aa2..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ /dev/null @@ -1,895 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.metadata; - -import com.google.common.collect.Sets; - -import org.elasticsearch.Version; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.metadata.IndexMetaData.State; -import org.elasticsearch.common.Strings; -import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.indices.IndexMissingException; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Test; - -import java.util.Arrays; -import java.util.HashSet; - -import static com.google.common.collect.Sets.newHashSet; -import static org.hamcrest.Matchers.*; - -/** - */ -public class MetaDataTests extends ElasticsearchTestCase { - - @Test - public void testIndexOptions_strict() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.strictExpandOpen(), IndicesOptions.strictExpand()}; - - for (IndicesOptions options : indicesOptions) { - String[] results = md.concreteIndices(options, "foo"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - try { - md.concreteIndices(options, "bar"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - results = md.concreteIndices(options, "foofoo", "foobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - - results = md.concreteIndices(options, "foofoobar"); - assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), - new HashSet<>(Arrays.asList(results))); - - try { - md.concreteIndices(options, "bar"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - try { - md.concreteIndices(options, "foo", "bar"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - results = md.concreteIndices(options, "barbaz", "foobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - - try { - md.concreteIndices(options, "barbaz", "bar"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - results = md.concreteIndices(options, "baz*"); - assertThat(results, emptyArray()); - - results = md.concreteIndices(options, "foo", "baz*"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - } - - String[] results = md.concreteIndices(IndicesOptions.strictExpandOpen(), Strings.EMPTY_ARRAY); - assertEquals(3, results.length); - - results = md.concreteIndices(IndicesOptions.strictExpandOpen(), null); - assertEquals(3, results.length); - - results = md.concreteIndices(IndicesOptions.strictExpand(), Strings.EMPTY_ARRAY); - assertEquals(4, results.length); - - results = md.concreteIndices(IndicesOptions.strictExpand(), null); - assertEquals(4, results.length); - - results = md.concreteIndices(IndicesOptions.strictExpandOpen(), "foofoo*"); - assertEquals(3, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); - - results = md.concreteIndices(IndicesOptions.strictExpand(), "foofoo*"); - assertEquals(4, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); - } - - @Test - public void testIndexOptions_lenient() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - IndicesOptions lenientExpand = IndicesOptions.fromOptions(true, true, true, true); - IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand}; - - for (IndicesOptions options : indicesOptions) { - String[] results = md.concreteIndices(options, "foo"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - results = md.concreteIndices(options, "bar"); - assertThat(results, emptyArray()); - - results = md.concreteIndices(options, "foofoo", "foobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - - results = md.concreteIndices(options, "foofoobar"); - assertEquals(2, results.length); - assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), - new HashSet<>(Arrays.asList(results))); - - results = md.concreteIndices(options, "foo", "bar"); - assertEquals(1, results.length); - assertThat(results, arrayContainingInAnyOrder("foo")); - - results = md.concreteIndices(options, "barbaz", "foobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - - results = md.concreteIndices(options, "barbaz", "bar"); - assertEquals(1, results.length); - assertThat(results, arrayContainingInAnyOrder("foofoo")); - - results = md.concreteIndices(options, "baz*"); - assertThat(results, emptyArray()); - - results = md.concreteIndices(options, "foo", "baz*"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - } - - String[] results = md.concreteIndices(IndicesOptions.lenientExpandOpen(), Strings.EMPTY_ARRAY); - assertEquals(3, results.length); - - results = md.concreteIndices(lenientExpand, Strings.EMPTY_ARRAY); - assertEquals(4, results.length); - - results = md.concreteIndices(IndicesOptions.lenientExpandOpen(), "foofoo*"); - assertEquals(3, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); - - results = md.concreteIndices(lenientExpand, "foofoo*"); - assertEquals(4, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); - } - - @Test - public void testIndexOptions_allowUnavailableDisallowEmpty() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo")) - .put(indexBuilder("foobar")) - .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - IndicesOptions expandOpen = IndicesOptions.fromOptions(true, false, true, false); - IndicesOptions expand = IndicesOptions.fromOptions(true, false, true, true); - IndicesOptions[] indicesOptions = new IndicesOptions[]{expandOpen, expand}; - - for (IndicesOptions options : indicesOptions) { - String[] results = md.concreteIndices(options, "foo"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - try { - md.concreteIndices(options, "bar"); - fail(); - } catch(IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - try { - md.concreteIndices(options, "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - try { - md.concreteIndices(options, "foo", "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - } - - String[] results = md.concreteIndices(expandOpen, Strings.EMPTY_ARRAY); - assertEquals(3, results.length); - - results = md.concreteIndices(expand, Strings.EMPTY_ARRAY); - assertEquals(4, results.length); - } - - @Test - public void testIndexOptions_wildcardExpansion() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("bar")) - .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - // Only closed - IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true); - String[] results = md.concreteIndices(options, Strings.EMPTY_ARRAY); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - results = md.concreteIndices(options, "foo*"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - // no wildcards, so wildcard expansion don't apply - results = md.concreteIndices(options, "bar"); - assertEquals(1, results.length); - assertEquals("bar", results[0]); - - // Only open - options = IndicesOptions.fromOptions(false, true, true, false); - results = md.concreteIndices(options, Strings.EMPTY_ARRAY); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("bar", "foobar")); - - results = md.concreteIndices(options, "foo*"); - assertEquals(1, results.length); - assertEquals("foobar", results[0]); - - results = md.concreteIndices(options, "bar"); - assertEquals(1, results.length); - assertEquals("bar", results[0]); - - // Open and closed - options = IndicesOptions.fromOptions(false, true, true, true); - results = md.concreteIndices(options, Strings.EMPTY_ARRAY); - assertEquals(3, results.length); - assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo")); - - results = md.concreteIndices(options, "foo*"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foobar", "foo")); - - results = md.concreteIndices(options, "bar"); - assertEquals(1, results.length); - assertEquals("bar", results[0]); - - results = md.concreteIndices(options, "-foo*"); - assertEquals(1, results.length); - assertEquals("bar", results[0]); - - results = md.concreteIndices(options, "-*"); - assertEquals(0, results.length); - - options = IndicesOptions.fromOptions(false, false, true, true); - try { - md.concreteIndices(options, "-*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("[-*]")); - } - } - - @Test - public void testIndexOptions_noExpandWildcards() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - //ignore unavailable and allow no indices - { - IndicesOptions noExpandLenient = IndicesOptions.fromOptions(true, true, false, false); - - String[] results = md.concreteIndices(noExpandLenient, "baz*"); - assertThat(results, emptyArray()); - - results = md.concreteIndices(noExpandLenient, "foo", "baz*"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - results = md.concreteIndices(noExpandLenient, "foofoobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); - - results = md.concreteIndices(noExpandLenient, null); - assertEquals(0, results.length); - - results = md.concreteIndices(noExpandLenient, Strings.EMPTY_ARRAY); - assertEquals(0, results.length); - } - - //ignore unavailable but don't allow no indices - { - IndicesOptions noExpandDisallowEmpty = IndicesOptions.fromOptions(true, false, false, false); - - try { - md.concreteIndices(noExpandDisallowEmpty, "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - String[] results = md.concreteIndices(noExpandDisallowEmpty, "foo", "baz*"); - assertEquals(1, results.length); - assertEquals("foo", results[0]); - - results = md.concreteIndices(noExpandDisallowEmpty, "foofoobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); - } - - //error on unavailable but allow no indices - { - IndicesOptions noExpandErrorUnavailable = IndicesOptions.fromOptions(false, true, false, false); - - String[] results = md.concreteIndices(noExpandErrorUnavailable, "baz*"); - assertThat(results, emptyArray()); - - try { - md.concreteIndices(noExpandErrorUnavailable, "foo", "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - results = md.concreteIndices(noExpandErrorUnavailable, "foofoobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); - } - - //error on both unavailable and no indices - { - IndicesOptions noExpandStrict = IndicesOptions.fromOptions(false, false, false, false); - - try { - md.concreteIndices(noExpandStrict, "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - try { - md.concreteIndices(noExpandStrict, "foo", "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - String[] results = md.concreteIndices(noExpandStrict, "foofoobar"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); - } - } - - @Test - public void testIndexOptions_singleIndexNoExpandWildcards() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar"))) - .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE)) - .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz"))); - MetaData md = mdBuilder.build(); - - //error on both unavailable and no indices + every alias needs to expand to a single index - - try { - md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - try { - md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "baz*"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("baz*")); - } - - try { - md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoobar"); - fail(); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); - } - - try { - md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "foofoobar"); - fail(); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); - } - - try { - md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoo-closed", "foofoobar"); - fail(); - } catch(IndexClosedException e) { - assertThat(e.getMessage(), equalTo("closed")); - assertEquals(e.index().getName(), "foofoo-closed"); - } - - String[] results = md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "barbaz"); - assertEquals(2, results.length); - assertThat(results, arrayContainingInAnyOrder("foo", "foofoo")); - } - - @Test - public void testIndexOptions_emptyCluster() { - MetaData md = MetaData.builder().build(); - IndicesOptions options = IndicesOptions.strictExpandOpen(); - - String[] results = md.concreteIndices(options, Strings.EMPTY_ARRAY); - assertThat(results, emptyArray()); - try { - md.concreteIndices(options, "foo"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("foo")); - } - results = md.concreteIndices(options, "foo*"); - assertThat(results, emptyArray()); - try { - md.concreteIndices(options, "foo*", "bar"); - fail(); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("bar")); - } - - - options = IndicesOptions.lenientExpandOpen(); - results = md.concreteIndices(options, Strings.EMPTY_ARRAY); - assertThat(results, emptyArray()); - results = md.concreteIndices(options, "foo"); - assertThat(results, emptyArray()); - results = md.concreteIndices(options, "foo*"); - assertThat(results, emptyArray()); - results = md.concreteIndices(options, "foo*", "bar"); - assertThat(results, emptyArray()); - - options = IndicesOptions.fromOptions(true, false, true, false); - try { - md.concreteIndices(options, Strings.EMPTY_ARRAY); - } catch (IndexMissingException e) { - assertThat(e.index().name(), equalTo("_all")); - } - } - - @Test - public void testConvertWildcardsJustIndicesTests() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX")) - .put(indexBuilder("testXYY")) - .put(indexBuilder("testYYY")) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "ku*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "kuku"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"test*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*", "kuku"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); - } - - @Test - public void testConvertWildcardsTests() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX").putAlias(AliasMetaData.builder("alias1")).putAlias(AliasMetaData.builder("alias2"))) - .put(indexBuilder("testXYY").putAlias(AliasMetaData.builder("alias2"))) - .put(indexBuilder("testYYY").putAlias(AliasMetaData.builder("alias3"))) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testYY*", "alias*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("alias1", "alias2", "alias3", "testYYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"-kuku"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"+test*", "-testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testX*", "+testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testYYY", "+testX*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - } - - @Test - public void testConvertWildcardsOpenClosedIndicesTests() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX").state(State.OPEN)) - .put(indexBuilder("testXXY").state(State.OPEN)) - .put(indexBuilder("testXYY").state(State.CLOSE)) - .put(indexBuilder("testYYY").state(State.OPEN)) - .put(indexBuilder("testYYX").state(State.CLOSE)) - .put(indexBuilder("kuku").state(State.OPEN)); - MetaData md = mdBuilder.build(); - // Can't test when wildcard expansion is turned off here as convertFromWildcards shouldn't be called in this case. Tests for this are covered in the concreteIndices() tests - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, true, true))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, false, true))), equalTo(newHashSet("testXYY"))); - assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, true, false))), equalTo(newHashSet("testXXX", "testXXY"))); - } - - private IndexMetaData.Builder indexBuilder(String index) { - return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); - } - - @Test(expected = IndexMissingException.class) - public void testConcreteIndicesIgnoreIndicesOneMissingIndex() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX")) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - md.concreteIndices(IndicesOptions.strictExpandOpen(), "testZZZ"); - } - - @Test - public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX")) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.lenientExpandOpen(), "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); - } - - @Test(expected = IndexMissingException.class) - public void testConcreteIndicesIgnoreIndicesAllMissing() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX")) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.strictExpandOpen(), "testMo", "testMahdy")), equalTo(newHashSet("testXXX"))); - } - - @Test - public void testConcreteIndicesIgnoreIndicesEmptyRequest() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX")) - .put(indexBuilder("kuku")); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.lenientExpandOpen(), new String[]{})), equalTo(Sets.newHashSet("kuku", "testXXX"))); - } - - @Test - public void testConcreteIndicesWildcardExpansion() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX").state(State.OPEN)) - .put(indexBuilder("testXXY").state(State.OPEN)) - .put(indexBuilder("testXYY").state(State.CLOSE)) - .put(indexBuilder("testYYY").state(State.OPEN)) - .put(indexBuilder("testYYX").state(State.OPEN)); - MetaData md = mdBuilder.build(); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, false, false), "testX*")), equalTo(new HashSet())); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, true, false), "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, false, true), "testX*")), equalTo(newHashSet("testXYY"))); - assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, true, true), "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); - } - - /** - * test resolving _all pattern (null, empty array or "_all") for random IndicesOptions - */ - @Test - public void testConcreteIndicesAllPatternRandom() { - for (int i = 0; i < 10; i++) { - String[] allIndices = null; - switch (randomIntBetween(0, 2)) { - case 0: - break; - case 1: - allIndices = new String[0]; - break; - case 2: - allIndices = new String[] { MetaData.ALL }; - break; - } - - IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - MetaData metadata = MetaData.builder().build(); - - // with no indices, asking for all indices should return empty list or exception, depending on indices options - if (indicesOptions.allowNoIndices()) { - String[] concreteIndices = metadata.concreteIndices(indicesOptions, allIndices); - assertThat(concreteIndices, notNullValue()); - assertThat(concreteIndices.length, equalTo(0)); - } else { - checkCorrectException(metadata, indicesOptions, allIndices); - } - - // with existing indices, asking for all indices should return all open/closed indices depending on options - metadata = MetaData.builder() - .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1"))) - .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1"))) - .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1"))) - .build(); - if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed() || indicesOptions.allowNoIndices()) { - String[] concreteIndices = metadata.concreteIndices(indicesOptions, allIndices); - assertThat(concreteIndices, notNullValue()); - int expectedNumberOfIndices = 0; - if (indicesOptions.expandWildcardsOpen()) { - expectedNumberOfIndices += 2; - } - if (indicesOptions.expandWildcardsClosed()) { - expectedNumberOfIndices += 1; - } - assertThat(concreteIndices.length, equalTo(expectedNumberOfIndices)); - } else { - checkCorrectException(metadata, indicesOptions, allIndices); - } - } - } - - /** - * check for correct exception type depending on indicesOptions and provided index name list - */ - private void checkCorrectException(MetaData metadata, IndicesOptions indicesOptions, String[] allIndices) { - // two different exception types possible - if (!(indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) - && (allIndices == null || allIndices.length == 0)) { - try { - metadata.concreteIndices(indicesOptions, allIndices); - fail("no wildcard expansion and null or empty list argument should trigger ElasticsearchIllegalArgumentException"); - } catch (IllegalArgumentException e) { - // expected - } - } else { - try { - metadata.concreteIndices(indicesOptions, allIndices); - fail("wildcard expansion on should trigger IndexMissingException"); - } catch (IndexMissingException e) { - // expected - } - } - } - - /** - * test resolving wildcard pattern that matches no index of alias for random IndicesOptions - */ - @Test - public void testConcreteIndicesWildcardNoMatch() { - for (int i = 0; i < 10; i++) { - IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - MetaData metadata = MetaData.builder().build(); - - metadata = MetaData.builder() - .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1"))) - .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1"))) - .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1"))) - .build(); - - // asking for non existing wildcard pattern should return empty list or exception - if (indicesOptions.allowNoIndices()) { - String[] concreteIndices = metadata.concreteIndices(indicesOptions, "Foo*"); - assertThat(concreteIndices, notNullValue()); - assertThat(concreteIndices.length, equalTo(0)); - } else { - try { - metadata.concreteIndices(indicesOptions, "Foo*"); - fail("expecting exeption when result empty and allowNoIndicec=false"); - } catch (IndexMissingException e) { - // expected exception - } - } - } - } - - @Test - public void testIsAllIndices_null() throws Exception { - assertThat(MetaData.isAllIndices(null), equalTo(true)); - } - - @Test - public void testIsAllIndices_empty() throws Exception { - assertThat(MetaData.isAllIndices(new String[0]), equalTo(true)); - } - - @Test - public void testIsAllIndices_explicitAll() throws Exception { - assertThat(MetaData.isAllIndices(new String[]{"_all"}), equalTo(true)); - } - - @Test - public void testIsAllIndices_explicitAllPlusOther() throws Exception { - assertThat(MetaData.isAllIndices(new String[]{"_all", "other"}), equalTo(false)); - } - - @Test - public void testIsAllIndices_normalIndexes() throws Exception { - assertThat(MetaData.isAllIndices(new String[]{"index1", "index2", "index3"}), equalTo(false)); - } - - @Test - public void testIsAllIndices_wildcard() throws Exception { - assertThat(MetaData.isAllIndices(new String[]{"*"}), equalTo(false)); - } - - @Test - public void testIsExplicitAllIndices_null() throws Exception { - assertThat(MetaData.isExplicitAllPattern(null), equalTo(false)); - } - - @Test - public void testIsExplicitAllIndices_empty() throws Exception { - assertThat(MetaData.isExplicitAllPattern(new String[0]), equalTo(false)); - } - - @Test - public void testIsExplicitAllIndices_explicitAll() throws Exception { - assertThat(MetaData.isExplicitAllPattern(new String[]{"_all"}), equalTo(true)); - } - - @Test - public void testIsExplicitAllIndices_explicitAllPlusOther() throws Exception { - assertThat(MetaData.isExplicitAllPattern(new String[]{"_all", "other"}), equalTo(false)); - } - - @Test - public void testIsExplicitAllIndices_normalIndexes() throws Exception { - assertThat(MetaData.isExplicitAllPattern(new String[]{"index1", "index2", "index3"}), equalTo(false)); - } - - @Test - public void testIsExplicitAllIndices_wildcard() throws Exception { - assertThat(MetaData.isExplicitAllPattern(new String[]{"*"}), equalTo(false)); - } - - @Test - public void testIsPatternMatchingAllIndices_explicitList() throws Exception { - //even though it does identify all indices, it's not a pattern but just an explicit list of them - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(concreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(concreteIndices, concreteIndices), equalTo(false)); - } - - @Test - public void testIsPatternMatchingAllIndices_onlyWildcard() throws Exception { - String[] indicesOrAliases = new String[]{"*"}; - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(concreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true)); - } - - @Test - public void testIsPatternMatchingAllIndices_matchingTrailingWildcard() throws Exception { - String[] indicesOrAliases = new String[]{"index*"}; - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(concreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true)); - } - - @Test - public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcard() throws Exception { - String[] indicesOrAliases = new String[]{"index*"}; - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - String[] allConcreteIndices = new String[]{"index1", "index2", "index3", "a", "b"}; - MetaData metaData = metaDataBuilder(allConcreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false)); - } - - @Test - public void testIsPatternMatchingAllIndices_matchingSingleExclusion() throws Exception { - String[] indicesOrAliases = new String[]{"-index1", "+index1"}; - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(concreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true)); - } - - @Test - public void testIsPatternMatchingAllIndices_nonMatchingSingleExclusion() throws Exception { - String[] indicesOrAliases = new String[]{"-index1"}; - String[] concreteIndices = new String[]{"index2", "index3"}; - String[] allConcreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(allConcreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false)); - } - - @Test - public void testIsPatternMatchingAllIndices_matchingTrailingWildcardAndExclusion() throws Exception { - String[] indicesOrAliases = new String[]{"index*", "-index1", "+index1"}; - String[] concreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(concreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true)); - } - - @Test - public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcardAndExclusion() throws Exception { - String[] indicesOrAliases = new String[]{"index*", "-index1"}; - String[] concreteIndices = new String[]{"index2", "index3"}; - String[] allConcreteIndices = new String[]{"index1", "index2", "index3"}; - MetaData metaData = metaDataBuilder(allConcreteIndices); - assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false)); - } - - @Test - public void testIndexOptions_failClosedIndicesAndAliases() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) - .put(indexBuilder("foo2-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar2-closed"))) - .put(indexBuilder("foo3").putAlias(AliasMetaData.builder("foobar2-closed"))); - MetaData md = mdBuilder.build(); - - IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed(); - try { - md.concreteIndices(options, "foo1-closed"); - fail("foo1-closed should be closed, but it is open"); - } catch (IndexClosedException e) { - // expected - } - - try { - md.concreteIndices(options, "foobar1-closed"); - fail("foo1-closed should be closed, but it is open"); - } catch (IndexClosedException e) { - // expected - } - - options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options); - String[] results = md.concreteIndices(options, "foo1-closed"); - assertThat(results, emptyArray()); - - results = md.concreteIndices(options, "foobar1-closed"); - assertThat(results, emptyArray()); - - options = IndicesOptions.lenientExpandOpen(); - results = md.concreteIndices(options, "foo1-closed"); - assertThat(results, arrayWithSize(1)); - assertThat(results, arrayContaining("foo1-closed")); - - results = md.concreteIndices(options, "foobar1-closed"); - assertThat(results, arrayWithSize(1)); - assertThat(results, arrayContaining("foo1-closed")); - - // testing an alias pointing to three indices: - options = IndicesOptions.strictExpandOpenAndForbidClosed(); - try { - md.concreteIndices(options, "foobar2-closed"); - fail("foo2-closed should be closed, but it is open"); - } catch (IndexClosedException e) { - // expected - } - - options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options); - results = md.concreteIndices(options, "foobar2-closed"); - assertThat(results, arrayWithSize(1)); - assertThat(results, arrayContaining("foo3")); - - options = IndicesOptions.lenientExpandOpen(); - results = md.concreteIndices(options, "foobar2-closed"); - assertThat(results, arrayWithSize(3)); - assertThat(results, arrayContainingInAnyOrder("foo1-closed", "foo2-closed", "foo3")); - } - - private MetaData metaDataBuilder(String... indices) { - MetaData.Builder mdBuilder = MetaData.builder(); - for (String concreteIndex : indices) { - mdBuilder.put(indexBuilder(concreteIndex)); - } - return mdBuilder.build(); - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java new file mode 100644 index 00000000000..6c4af1e1873 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.util.Arrays; + +import static com.google.common.collect.Sets.newHashSet; +import static org.hamcrest.Matchers.equalTo; + +public class WildcardExpressionResolverTests extends ElasticsearchTestCase { + + @Test + public void testConvertWildcardsJustIndicesTests() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("testXYY")) + .put(indexBuilder("testYYY")) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX"))), equalTo(newHashSet("testXXX"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "ku*"))), equalTo(newHashSet("testXXX", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + } + + @Test + public void testConvertWildcardsTests() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX").putAlias(AliasMetaData.builder("alias1")).putAlias(AliasMetaData.builder("alias2"))) + .put(indexBuilder("testXYY").putAlias(AliasMetaData.builder("alias2"))) + .put(indexBuilder("testYYY").putAlias(AliasMetaData.builder("alias3"))) + .put(indexBuilder("kuku")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("alias1", "alias2", "alias3", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testX*", "+testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testYYY", "+testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + } + + @Test + public void testConvertWildcardsOpenClosedIndicesTests() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX").state(IndexMetaData.State.OPEN)) + .put(indexBuilder("testXXY").state(IndexMetaData.State.OPEN)) + .put(indexBuilder("testXYY").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("testYYY").state(IndexMetaData.State.OPEN)) + .put(indexBuilder("testYYX").state(IndexMetaData.State.CLOSE)) + .put(indexBuilder("kuku").state(IndexMetaData.State.OPEN)); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXYY"))); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXXY"))); + } + + private IndexMetaData.Builder indexBuilder(String index) { + return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java new file mode 100644 index 00000000000..2f64f39f059 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import static org.hamcrest.Matchers.*; + +/** + */ +public class AllocationIdTests extends ElasticsearchTestCase { + + @Test + public void testShardToStarted() { + logger.info("-- create unassigned shard"); + ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + assertThat(shard.allocationId(), nullValue()); + + logger.info("-- initialize the shard"); + shard.initialize("node1"); + AllocationId allocationId = shard.allocationId(); + assertThat(allocationId, notNullValue()); + assertThat(allocationId.getId(), notNullValue()); + assertThat(allocationId.getRelocationId(), nullValue()); + + logger.info("-- start the shard"); + shard.moveToStarted(); + assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); + allocationId = shard.allocationId(); + assertThat(allocationId.getId(), notNullValue()); + assertThat(allocationId.getRelocationId(), nullValue()); + } + + @Test + public void testSuccessfulRelocation() { + logger.info("-- build started shard"); + ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + shard.initialize("node1"); + shard.moveToStarted(); + + AllocationId allocationId = shard.allocationId(); + logger.info("-- relocate the shard"); + shard.relocate("node2"); + assertThat(shard.allocationId(), not(equalTo(allocationId))); + assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); + assertThat(shard.allocationId().getRelocationId(), notNullValue()); + + ShardRouting target = shard.buildTargetRelocatingShard(); + assertThat(target.allocationId().getId(), equalTo(shard.allocationId().getRelocationId())); + assertThat(target.allocationId().getRelocationId(), equalTo(shard.allocationId().getId())); + + logger.info("-- finalize the relocation"); + target.moveToStarted(); + assertThat(target.allocationId().getId(), equalTo(shard.allocationId().getRelocationId())); + assertThat(target.allocationId().getRelocationId(), nullValue()); + } + + @Test + public void testCancelRelocation() { + logger.info("-- build started shard"); + ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + shard.initialize("node1"); + shard.moveToStarted(); + + AllocationId allocationId = shard.allocationId(); + logger.info("-- relocate the shard"); + shard.relocate("node2"); + assertThat(shard.allocationId(), not(equalTo(allocationId))); + assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); + assertThat(shard.allocationId().getRelocationId(), notNullValue()); + allocationId = shard.allocationId(); + + logger.info("-- cancel relocation"); + shard.cancelRelocation(); + assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); + assertThat(shard.allocationId().getRelocationId(), nullValue()); + } + + @Test + public void testMoveToUnassigned() { + logger.info("-- build started shard"); + ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + shard.initialize("node1"); + shard.moveToStarted(); + + logger.info("-- move to unassigned"); + shard.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, null)); + assertThat(shard.allocationId(), nullValue()); + } + + @Test + public void testReinitializing() { + logger.info("-- build started shard"); + ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + shard.initialize("node1"); + shard.moveToStarted(); + AllocationId allocationId = shard.allocationId(); + + logger.info("-- reinitializing shard"); + shard.reinitializeShard(); + assertThat(shard.allocationId().getId(), notNullValue()); + assertThat(shard.allocationId().getRelocationId(), nullValue()); + assertThat(shard.allocationId().getId(), not(equalTo(allocationId.getId()))); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java index f2957d75d08..3892c3a2c4b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java @@ -64,7 +64,7 @@ public class RoutingBackwardCompatibilityTests extends ElasticsearchTestCase { RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); final int shardId = operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId(); - if (version.before(Version.V_2_0_0)) { + if (version.before(Version.V_2_0_0_beta1)) { assertEquals(pre20ExpectedShardId, shardId); } else { assertEquals(currentExpectedShard, shardId); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java index 9b04917aa77..97a1e3d790d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java @@ -142,7 +142,7 @@ public class RoutingServiceTests extends ElasticsearchAllocationTestCase { } @Override - void performReroute(String reason) { + protected void performReroute(String reason) { rerouted.set(true); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java index 23fad2de2ec..28e93ad693b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java @@ -28,7 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes.Builder; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ElasticsearchAllocationTestCase; import org.junit.Before; import org.junit.Test; @@ -116,7 +116,7 @@ public class RoutingTableTest extends ElasticsearchAllocationTestCase { try { assertThat(this.testRoutingTable.allShards("not_existing").size(), is(0)); fail("Exception expected when calling allShards() with non existing index name"); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { // expected } } @@ -191,7 +191,7 @@ public class RoutingTableTest extends ElasticsearchAllocationTestCase { try { this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); fail("Calling with non-existing index name should raise IndexMissingException"); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { // expected } } @@ -220,7 +220,7 @@ public class RoutingTableTest extends ElasticsearchAllocationTestCase { try { this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { fail("Calling with non-existing index should be ignored at the moment"); } } @@ -239,7 +239,7 @@ public class RoutingTableTest extends ElasticsearchAllocationTestCase { try { this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { fail("Calling with non-existing index should be ignored at the moment"); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index dcbd06fe74a..dd43b28ea05 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -82,7 +82,7 @@ public class ShardRoutingTests extends ElasticsearchTestCase { } try { - routing.assignToNode("boom"); + routing.initialize("boom"); fail("must be frozen"); } catch (IllegalStateException ex) { // expected diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/core/src/test/java/org/elasticsearch/cluster/routing/TestShardRouting.java index a0b1ff63969..82d3afc6e91 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -26,20 +26,39 @@ package org.elasticsearch.cluster.routing; public class TestShardRouting { public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) { - return new ShardRouting(index, shardId, currentNodeId, null, null, primary, state, version, null, true); + return new ShardRouting(index, shardId, currentNodeId, null, null, primary, state, version, null, buildAllocationId(state), true); } public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, long version) { - return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version, null, true); + return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version, null, buildAllocationId(state), true); + } + + public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId, long version) { + return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version, null, allocationId, true); } public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) { - return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, null, true); + return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, null, buildAllocationId(state), true); } public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version, UnassignedInfo unassignedInfo) { - return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, unassignedInfo, true); + return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, unassignedInfo, buildAllocationId(state), true); + } + + private static AllocationId buildAllocationId(ShardRoutingState state) { + switch (state) { + case UNASSIGNED: + return null; + case INITIALIZING: + case STARTED: + return AllocationId.newInitializing(); + case RELOCATING: + AllocationId allocationId = AllocationId.newInitializing(); + return AllocationId.newRelocation(allocationId); + default: + throw new IllegalStateException("illegal state"); + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 44e61c9bc84..0530477d3c7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -76,6 +76,7 @@ public class UnassignedInfoTests extends ElasticsearchAllocationTestCase { UnassignedInfo read = new UnassignedInfo(StreamInput.wrap(out.bytes())); assertThat(read.getReason(), equalTo(meta.getReason())); assertThat(read.getTimestampInMillis(), equalTo(meta.getTimestampInMillis())); + assertThat(read.getMessage(), equalTo(meta.getMessage())); assertThat(read.getDetails(), equalTo(meta.getDetails())); } @@ -125,7 +126,7 @@ public class UnassignedInfoTests extends ElasticsearchAllocationTestCase { .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .metaData(metaData) - .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), "test"), new IntHashSet())).build(); + .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), Version.CURRENT, "test"), new IntHashSet())).build(); for (ShardRouting shard : clusterState.routingNodes().shardsWithState(UNASSIGNED)) { assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED)); } @@ -138,7 +139,7 @@ public class UnassignedInfoTests extends ElasticsearchAllocationTestCase { .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .metaData(metaData) - .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), "test"))).build(); + .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), Version.CURRENT, "test"))).build(); for (ShardRouting shard : clusterState.routingNodes().shardsWithState(UNASSIGNED)) { assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED)); } @@ -189,7 +190,7 @@ public class UnassignedInfoTests extends ElasticsearchAllocationTestCase { ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, true, ShardRoutingState.UNASSIGNED, 1, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); ShardRouting mutable = new ShardRouting(shard); assertThat(mutable.unassignedInfo(), notNullValue()); - mutable.assignToNode("test_node"); + mutable.initialize("test_node"); assertThat(mutable.state(), equalTo(ShardRoutingState.INITIALIZING)); assertThat(mutable.unassignedInfo(), notNullValue()); mutable.moveToStarted(); @@ -248,12 +249,13 @@ public class UnassignedInfoTests extends ElasticsearchAllocationTestCase { assertThat(clusterState.routingNodes().hasUnassigned(), equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.routingNodes().shardsWithState(STARTED).get(0); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail")))).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build(); // verify the reason and details assertThat(clusterState.routingNodes().hasUnassigned(), equalTo(true)); assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), equalTo("test fail")); assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), equalTo("test fail")); assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getTimestampInMillis(), greaterThan(0l)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index be651c9221a..cfc7d5e1689 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -369,37 +369,37 @@ public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase { switch (sr.id()) { case 0: if (sr.primary()) { - allocation.routingNodes().assign(sr, "node1"); + allocation.routingNodes().initialize(sr, "node1"); } else { - allocation.routingNodes().assign(sr, "node0"); + allocation.routingNodes().initialize(sr, "node0"); } break; case 1: if (sr.primary()) { - allocation.routingNodes().assign(sr, "node1"); + allocation.routingNodes().initialize(sr, "node1"); } else { - allocation.routingNodes().assign(sr, "node2"); + allocation.routingNodes().initialize(sr, "node2"); } break; case 2: if (sr.primary()) { - allocation.routingNodes().assign(sr, "node3"); + allocation.routingNodes().initialize(sr, "node3"); } else { - allocation.routingNodes().assign(sr, "node2"); + allocation.routingNodes().initialize(sr, "node2"); } break; case 3: if (sr.primary()) { - allocation.routingNodes().assign(sr, "node3"); + allocation.routingNodes().initialize(sr, "node3"); } else { - allocation.routingNodes().assign(sr, "node1"); + allocation.routingNodes().initialize(sr, "node1"); } break; case 4: if (sr.primary()) { - allocation.routingNodes().assign(sr, "node2"); + allocation.routingNodes().initialize(sr, "node2"); } else { - allocation.routingNodes().assign(sr, "node0"); + allocation.routingNodes().initialize(sr, "node0"); } break; } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index fc2179936cd..569018c431d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -256,9 +256,9 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase { logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned"); prevRoutingTable = routingTable; - routingTable = strategy.applyFailedShard(clusterState, TestShardRouting.newShardRouting("test", 0, "node1", true, INITIALIZING, 0)).routingTable(); + ShardRouting firstShard = clusterState.routingNodes().node("node1").get(0); + routingTable = strategy.applyFailedShard(clusterState, firstShard).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - RoutingNodes routingNodes = clusterState.routingNodes(); assertThat(prevRoutingTable != routingTable, equalTo(true)); assertThat(routingTable.index("test").shards().size(), equalTo(1)); @@ -272,7 +272,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase { } logger.info("fail the shard again, see that nothing happens"); - assertThat(strategy.applyFailedShard(clusterState, TestShardRouting.newShardRouting("test", 0, "node1", true, INITIALIZING, 0)).changed(), equalTo(false)); + assertThat(strategy.applyFailedShard(clusterState, firstShard).changed(), equalTo(false)); } @Test @@ -318,7 +318,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase { String n = "node" + Integer.toString(randomInt(numberOfReplicas)); logger.info("failing shard on node [{}]", n); ShardRouting shardToFail = routingNodes.node(n).get(0); - failedShards.add(new FailedRerouteAllocation.FailedShard(new ShardRouting(shardToFail), null)); + failedShards.add(new FailedRerouteAllocation.FailedShard(new ShardRouting(shardToFail), null, null)); } routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable(); @@ -371,9 +371,9 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase { logger.info("fail the first shard, will start INITIALIZING on the second node"); prevRoutingTable = routingTable; - routingTable = strategy.applyFailedShard(clusterState, TestShardRouting.newShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).routingTable(); + final ShardRouting firstShard = clusterState.routingNodes().node("node1").get(0); + routingTable = strategy.applyFailedShard(clusterState, firstShard).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - RoutingNodes routingNodes = clusterState.routingNodes(); assertThat(prevRoutingTable != routingTable, equalTo(true)); assertThat(routingTable.index("test").shards().size(), equalTo(1)); @@ -387,7 +387,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase { } logger.info("fail the shard again, see that nothing happens"); - assertThat(strategy.applyFailedShard(clusterState, TestShardRouting.newShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).changed(), equalTo(false)); + assertThat(strategy.applyFailedShard(clusterState, firstShard).changed(), equalTo(false)); } @Test diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java new file mode 100644 index 00000000000..506d3d29e0f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ElasticsearchAllocationTestCase; +import org.junit.Test; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.nullValue; + +public class StartedShardsRoutingTests extends ElasticsearchAllocationTestCase { + + @Test + public void tesStartedShardsMatching() { + AllocationService allocation = createAllocationService(); + + logger.info("--> building initial cluster state"); + final IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(3).numberOfReplicas(0) + .build(); + ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + .metaData(MetaData.builder().put(indexMetaData, false)); + + final ShardRouting initShard = TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING, 1); + final ShardRouting startedShard = TestShardRouting.newShardRouting("test", 1, "node2", randomBoolean(), ShardRoutingState.STARTED, 1); + final ShardRouting relocatingShard = TestShardRouting.newShardRouting("test", 2, "node1", "node2", randomBoolean(), ShardRoutingState.RELOCATING, 1); + stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder("test") + .addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId(), true).addShard(initShard).build()) + .addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId(), true).addShard(startedShard).build()) + .addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId(), true).addShard(relocatingShard).build()))); + + ClusterState state = stateBuilder.build(); + + logger.info("--> test starting of shard"); + + RoutingAllocation.Result result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), + ShardRoutingState.INITIALIZING, initShard.allocationId(), randomInt())), false); + assertTrue("failed to start " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + assertTrue(initShard + "isn't started \ncurrent routing table:" + result.routingTable().prettyPrint(), + result.routingTable().index("test").shard(initShard.id()).allShardsStarted()); + + + logger.info("--> testing shard variants that shouldn't match the initializing shard"); + + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), + ShardRoutingState.INITIALIZING, 1)), false); + assertFalse("wrong allocation id flag shouldn't start shard " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(), initShard.primary(), + ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())) + , 1)), false); + assertFalse("relocating shard from node shouldn't start shard " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + + + + logger.info("--> testing double starting"); + + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primary(), + ShardRoutingState.INITIALIZING, startedShard.allocationId(), 1)), false); + assertFalse("duplicate starting of the same shard should be ignored \ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + + logger.info("--> testing starting of relocating shards"); + final AllocationId targetAllocationId = AllocationId.newTargetRelocation(relocatingShard.allocationId()); + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), + ShardRoutingState.INITIALIZING, targetAllocationId, randomInt())), false); + + assertTrue("failed to start " + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + ShardRouting shardRouting = result.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0); + assertThat(shardRouting.state(), equalTo(ShardRoutingState.STARTED)); + assertThat(shardRouting.currentNodeId(), equalTo("node2")); + assertThat(shardRouting.relocatingNodeId(), nullValue()); + + logger.info("--> testing shard variants that shouldn't match the initializing relocating shard"); + + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), + ShardRoutingState.INITIALIZING, relocatingShard.version()))); + assertFalse("wrong allocation id shouldn't start shard" + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + + result = allocation.applyStartedShards(state, Arrays.asList( + TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), + ShardRoutingState.INITIALIZING, relocatingShard.allocationId(), randomInt())), false); + assertFalse("wrong allocation id shouldn't start shard even if relocatingId==shard.id" + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); + + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java index 1db310c4770..f675be66b4d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java @@ -146,7 +146,7 @@ public class MockDiskUsagesTests extends ElasticsearchIntegrationTest { usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes()); paths[0] = path; FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), paths); - return new NodeStats(new DiscoveryNode(nodeName, null, Version.V_2_0_0), + return new NodeStats(new DiscoveryNode(nodeName, null, Version.V_2_0_0_beta1), System.currentTimeMillis(), null, null, null, null, null, fsInfo, diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 72ae1ffad18..d54f931e8d8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -352,33 +352,119 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase { OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider()); - GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0"); + GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:1"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:1"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(1)); //check node preference, first without preference to see they switch - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); String firstRoundNodeId = shardIterators.iterator().next().nextOrNull().currentNodeId(); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId))); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;_prefer_node:node1"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1")); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;_prefer_node:node1"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1")); } + + @Test + public void testReplicaShardPreferenceIters() throws Exception { + AllocationService strategy = createAllocationService(settingsBuilder() + .put("cluster.routing.allocation.concurrent_recoveries", 10) + .build()); + + OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider()); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); + + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .put(newNode("node1")) + .put(newNode("node2")) + .put(newNode("node3")) + .localNodeId("node1") + ).build(); + routingTable = strategy.reroute(clusterState).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + // When replicas haven't initialized, it comes back with the primary first, then initializing replicas + GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica_first"); + assertThat(shardIterators.size(), equalTo(2)); // two potential shards + ShardIterator iter = shardIterators.iterator().next(); + assertThat(iter.size(), equalTo(3)); // three potential candidates for the shard + ShardRouting routing = iter.nextOrNull(); + assertNotNull(routing); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertTrue(routing.primary()); // replicas haven't initialized yet, so primary is first + assertTrue(routing.started()); + routing = iter.nextOrNull(); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + assertTrue(routing.initializing()); + routing = iter.nextOrNull(); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + assertTrue(routing.initializing()); + + routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica"); + assertThat(shardIterators.size(), equalTo(2)); // two potential shards + iter = shardIterators.iterator().next(); + assertThat(iter.size(), equalTo(2)); // two potential replicas for the shard + routing = iter.nextOrNull(); + assertNotNull(routing); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + routing = iter.nextOrNull(); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica_first"); + assertThat(shardIterators.size(), equalTo(2)); // two potential shards + iter = shardIterators.iterator().next(); + assertThat(iter.size(), equalTo(3)); // three potential candidates for the shard + routing = iter.nextOrNull(); + assertNotNull(routing); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + routing = iter.nextOrNull(); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertFalse(routing.primary()); + // finally the primary + routing = iter.nextOrNull(); + assertThat(routing.shardId().id(), anyOf(equalTo(0), equalTo(1))); + assertTrue(routing.primary()); + } + } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/codecs/CodecTests.java b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java index 3dfbf8acc82..6347dc5da72 100644 --- a/core/src/test/java/org/elasticsearch/codecs/CodecTests.java +++ b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java @@ -51,11 +51,11 @@ public class CodecTests extends ElasticsearchSingleNodeTest { DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); try { parser.parse(mapping); - if (v.onOrAfter(Version.V_2_0_0)) { + if (v.onOrAfter(Version.V_2_0_0_beta1)) { fail("Elasticsearch 2.0 should not support custom postings formats"); } } catch (MapperParsingException e) { - if (v.before(Version.V_2_0_0)) { + if (v.before(Version.V_2_0_0_beta1)) { // Elasticsearch 1.x should ignore custom postings formats throw e; } @@ -74,11 +74,11 @@ public class CodecTests extends ElasticsearchSingleNodeTest { DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); try { parser.parse(mapping); - if (v.onOrAfter(Version.V_2_0_0)) { + if (v.onOrAfter(Version.V_2_0_0_beta1)) { fail("Elasticsearch 2.0 should not support custom postings formats"); } } catch (MapperParsingException e) { - if (v.before(Version.V_2_0_0)) { + if (v.before(Version.V_2_0_0_beta1)) { // Elasticsearch 1.x should ignore custom postings formats throw e; } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java b/core/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java new file mode 100644 index 00000000000..0a8492fb7b3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene.store; + +import com.google.common.base.Charsets; +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class ByteArrayIndexInputTests extends ElasticsearchTestCase { + + @Test + public void testRandomReads() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(Charsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + assertEquals(input.length, indexInput.length()); + assertEquals(0, indexInput.getFilePointer()); + byte[] output = randomReadAndSlice(indexInput, input.length); + assertArrayEquals(input, output); + } + } + + @Test + public void testRandomOverflow() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(Charsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + int firstReadLen = randomIntBetween(0, input.length - 1); + randomReadAndSlice(indexInput, firstReadLen); + int bytesLeft = input.length - firstReadLen; + try { + // read using int size + int secondReadLen = bytesLeft + randomIntBetween(1, 100); + indexInput.readBytes(new byte[secondReadLen], 0, secondReadLen); + fail(); + } catch (IOException ex) { + assertThat(ex.getMessage(), containsString("EOF")); + } + } + } + + @Test + public void testSeekOverflow() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(Charsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + int firstReadLen = randomIntBetween(0, input.length - 1); + randomReadAndSlice(indexInput, firstReadLen); + try { + switch (randomIntBetween(0, 2)) { + case 0: + indexInput.seek(Integer.MAX_VALUE + 4L); + break; + case 1: + indexInput.seek(-randomIntBetween(1, 10)); + break; + case 2: + int seek = input.length + randomIntBetween(1, 100); + indexInput.seek(seek); + break; + default: + fail(); + } + fail(); + } catch (IOException ex) { + assertThat(ex.getMessage(), containsString("EOF")); + } catch (IllegalArgumentException ex) { + assertThat(ex.getMessage(), containsString("negative position")); + } + } + } + + private byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IOException { + int readPos = (int) indexInput.getFilePointer(); + byte[] output = new byte[length]; + while (readPos < length) { + switch (randomIntBetween(0, 3)) { + case 0: + // Read by one byte at a time + output[readPos++] = indexInput.readByte(); + break; + case 1: + // Read several bytes into target + int len = randomIntBetween(1, length - readPos); + indexInput.readBytes(output, readPos, len); + readPos += len; + break; + case 2: + // Read several bytes into 0-offset target + len = randomIntBetween(1, length - readPos); + byte[] temp = new byte[len]; + indexInput.readBytes(temp, 0, len); + System.arraycopy(temp, 0, output, readPos, len); + readPos += len; + break; + case 3: + // Read using slice + len = randomIntBetween(1, length - readPos); + IndexInput slice = indexInput.slice("slice (" + readPos + ", " + len + ") of " + indexInput.toString(), readPos, len); + temp = randomReadAndSlice(slice, len); + // assert that position in the original input didn't change + assertEquals(readPos, indexInput.getFilePointer()); + System.arraycopy(temp, 0, output, readPos, len); + readPos += len; + indexInput.seek(readPos); + assertEquals(readPos, indexInput.getFilePointer()); + break; + default: + fail(); + } + assertEquals((long) readPos, indexInput.getFilePointer()); + } + return output; + } +} + diff --git a/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 74ed24a5ec4..dc3c66b4e83 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.unit; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -143,16 +142,6 @@ public class FuzzinessTests extends ElasticsearchTestCase { public void testAuto() { final int codePoints = randomIntBetween(0, 10); String string = randomRealisticUnicodeOfCodepointLength(codePoints); - if (codePoints <= 2) { - assertThat(Fuzziness.AUTO.asDistance(string), equalTo(0)); - assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(0)); - } else if (codePoints > 5) { - assertThat(Fuzziness.AUTO.asDistance(string), equalTo(2)); - assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(2)); - } else { - assertThat(Fuzziness.AUTO.asDistance(string), equalTo(1)); - assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(1)); - } assertThat(Fuzziness.AUTO.asByte(), equalTo((byte) 1)); assertThat(Fuzziness.AUTO.asInt(), equalTo(1)); assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f)); @@ -173,28 +162,4 @@ public class FuzzinessTests extends ElasticsearchTestCase { } } - @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10638") - public void testSimilarityToDistance() { - assertThat(Fuzziness.fromSimilarity(0.5f).asDistance("ab"), equalTo(1)); - assertThat(Fuzziness.fromSimilarity(0.66f).asDistance("abcefg"), equalTo(2)); - assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("ab"), equalTo(0)); - assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("abcefg"), equalTo(1)); - assertThat((double) Fuzziness.ONE.asSimilarity("abcefg"), closeTo(0.8f, 0.05)); - assertThat((double) Fuzziness.TWO.asSimilarity("abcefg"), closeTo(0.66f, 0.05)); - assertThat((double) Fuzziness.ONE.asSimilarity("ab"), closeTo(0.5f, 0.05)); - - int iters = randomIntBetween(100, 1000); - for (int i = 0; i < iters; i++) { - Fuzziness fuzziness = Fuzziness.fromEdits(between(1, 2)); - String string = rarely() ? randomRealisticUnicodeOfLengthBetween(2, 4) : - randomRealisticUnicodeOfLengthBetween(4, 10); - float similarity = fuzziness.asSimilarity(string); - if (similarity != 0.0f) { - Fuzziness similarityBased = Fuzziness.build(similarity); - assertThat((double) similarityBased.asSimilarity(string), closeTo(similarity, 0.05)); - assertThat(similarityBased.asDistance(string), equalTo(Math.min(2, fuzziness.asDistance(string)))); - } - } - } } diff --git a/core/src/test/java/org/elasticsearch/common/util/URIPatternTests.java b/core/src/test/java/org/elasticsearch/common/util/URIPatternTests.java new file mode 100644 index 00000000000..5c4b2a7c701 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/util/URIPatternTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.util; + +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.net.URI; + +public class URIPatternTests extends ElasticsearchTestCase { + + @Test + public void testURIPattern() throws Exception { + assertTrue(new URIPattern("http://test.local/").match(new URI("http://test.local/"))); + assertFalse(new URIPattern("http://test.local/somepath").match(new URI("http://test.local/"))); + assertTrue(new URIPattern("http://test.local/somepath").match(new URI("http://test.local/somepath"))); + assertFalse(new URIPattern("http://test.local/somepath").match(new URI("http://test.local/somepath/more"))); + assertTrue(new URIPattern("http://test.local/somepath/*").match(new URI("http://test.local/somepath/more"))); + assertTrue(new URIPattern("http://test.local/somepath/*").match(new URI("http://test.local/somepath/more/andmore"))); + assertTrue(new URIPattern("http://test.local/somepath/*").match(new URI("http://test.local/somepath/more/andmore/../bitmore"))); + assertFalse(new URIPattern("http://test.local/somepath/*").match(new URI("http://test.local/somepath/../more"))); + assertFalse(new URIPattern("http://test.local/somepath/*").match(new URI("http://test.local/"))); + assertFalse(new URIPattern("http://test.local/somepath/*").match(new URI("https://test.local/somepath/more"))); + assertFalse(new URIPattern("http://test.local:1234/somepath/*").match(new URI("http://test.local/somepath/more"))); + assertFalse(new URIPattern("http://test.local:1234/somepath/*").match(new URI("http://test.local/somepath/more"))); + assertTrue(new URIPattern("http://test.local:1234/somepath/*").match(new URI("http://test.local:1234/somepath/more"))); + assertTrue(new URIPattern("http://*.local:1234/somepath/*").match(new URI("http://foobar.local:1234/somepath/more"))); + assertFalse(new URIPattern("http://*.local:1234/somepath/*").match(new URI("http://foobar.local:2345/somepath/more"))); + assertTrue(new URIPattern("http://*.local:*/somepath/*").match(new URI("http://foobar.local:2345/somepath/more"))); + assertFalse(new URIPattern("http://*.local:*/somepath/*").match(new URI("http://foobar.local:2345/somepath/more?par=val"))); + assertTrue(new URIPattern("http://*.local:*/somepath/*?*").match(new URI("http://foobar.local:2345/somepath/more?par=val"))); + assertFalse(new URIPattern("http://*.local:*/somepath/*?*").match(new URI("http://foobar.local:2345/somepath/more?par=val#frag"))); + assertTrue(new URIPattern("http://*.local:*/somepath/*?*#*").match(new URI("http://foobar.local:2345/somepath/more?par=val#frag"))); + assertTrue(new URIPattern("http://*.local/somepath/*?*#*").match(new URI("http://foobar.local/somepath/more"))); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 38477eda670..b74a016460f 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -253,7 +253,7 @@ public class PrioritizedExecutorsTests extends ElasticsearchTestCase { public void run() { invoked.countDown(); } - }, timer, TimeValue.timeValueMillis(1000), new Runnable() { + }, timer, TimeValue.timeValueHours(1), new Runnable() { @Override public void run() { // We should never get here diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index fc00f931d7a..ec76100c9b4 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -20,14 +20,17 @@ package org.elasticsearch.common.xcontent.builder; import com.google.common.collect.Lists; + import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.FastCharArrayWriter; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; import java.io.IOException; +import java.nio.file.Path; import java.util.*; import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.CAMELCASE; @@ -260,4 +263,60 @@ public class XContentBuilderTests extends ElasticsearchTestCase { assertThat(i, equalTo(terms.size())); } + + @Test + public void testHandlingOfPath() throws IOException { + Path path = PathUtils.get("path"); + checkPathSerialization(path); + } + + @Test + public void testHandlingOfPath_relative() throws IOException { + Path path = PathUtils.get("..", "..", "path"); + checkPathSerialization(path); + } + + @Test + public void testHandlingOfPath_absolute() throws IOException { + Path path = createTempDir().toAbsolutePath(); + checkPathSerialization(path); + } + + private void checkPathSerialization(Path path) throws IOException { + XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + pathBuilder.startObject().field("file", path).endObject(); + + XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + stringBuilder.startObject().field("file", path.toString()).endObject(); + + assertThat(pathBuilder.string(), equalTo(stringBuilder.string())); + } + + @Test + public void testHandlingOfPath_XContentBuilderStringName() throws IOException { + Path path = PathUtils.get("path"); + XContentBuilderString name = new XContentBuilderString("file"); + + XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + pathBuilder.startObject().field(name, path).endObject(); + + XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + stringBuilder.startObject().field(name, path.toString()).endObject(); + + assertThat(pathBuilder.string(), equalTo(stringBuilder.string())); + } + + @Test + public void testHandlingOfCollectionOfPaths() throws IOException { + Path path = PathUtils.get("path"); + + XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + pathBuilder.startObject().field("file", Arrays.asList(path)).endObject(); + + XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + stringBuilder.startObject().field("file", Arrays.asList(path.toString())).endObject(); + + assertThat(pathBuilder.string(), equalTo(stringBuilder.string())); + } + } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java index c12f57dfb0f..0755b9ab6fd 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java @@ -240,12 +240,16 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati /** Verify that nodes fault detection works after master (re) election */ @Test public void testNodesFDAfterMasterReelection() throws Exception { - startCluster(3); + startCluster(4); - logger.info("stopping current master"); + logger.info("--> stopping current master"); internalCluster().stopCurrentMasterNode(); - ensureStableCluster(2); + ensureStableCluster(3); + + logger.info("--> reducing min master nodes to 2"); + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)).get()); String master = internalCluster().getMasterName(); String nonMaster = null; @@ -259,7 +263,7 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati addRandomIsolation(nonMaster).startDisrupting(); logger.info("--> waiting for master to remove it"); - ensureStableCluster(1, master); + ensureStableCluster(2, master); } /** @@ -703,12 +707,13 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati } /** - * Test that a document which is indexed on the majority side of a partition, is available from the minory side, + * Test that a document which is indexed on the majority side of a partition, is available from the minority side, * once the partition is healed * * @throws Exception */ @Test + @TestLogging(value = "cluster.service:TRACE") public void testRejoinDocumentExistsInAllShardCopies() throws Exception { List nodes = startCluster(3); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java new file mode 100644 index 00000000000..2eeef507bce --- /dev/null +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -0,0 +1,568 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.BaseFuture; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.membership.MembershipAction; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.junit.Before; + +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +@TestLogging("discovery.zen:TRACE") +public class NodeJoinControllerTests extends ElasticsearchTestCase { + + private TestClusterService clusterService; + private NodeJoinController nodeJoinController; + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = new TestClusterService(); + final DiscoveryNodes initialNodes = clusterService.state().nodes(); + final DiscoveryNode localNode = initialNodes.localNode(); + // make sure we have a master + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); + nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), + new DiscoverySettings(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)), Settings.EMPTY); + } + + public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { + List nodes = new ArrayList<>(); + nodes.add(clusterService.localNode()); + + int nodeId = 0; + for (int i = randomInt(5); i > 0; i--) { + DiscoveryNode node = newNode(nodeId++); + nodes.add(node); + joinNode(node); + } + nodeJoinController.startAccumulatingJoins(); + ArrayList> pendingJoins = new ArrayList<>(); + for (int i = randomInt(5); i > 0; i--) { + DiscoveryNode node = newNode(nodeId++); + nodes.add(node); + pendingJoins.add(joinNodeAsync(node)); + } + nodeJoinController.stopAccumulatingJoins(); + for (int i = randomInt(5); i > 0; i--) { + DiscoveryNode node = newNode(nodeId++); + nodes.add(node); + joinNode(node); + } + assertNodesInCurrentState(nodes); + for (Future joinFuture : pendingJoins) { + assertThat(joinFuture.isDone(), equalTo(true)); + } + } + + public void testFailingJoinsWhenNotMaster() throws ExecutionException, InterruptedException { + // remove current master flag + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + int nodeId = 0; + try { + joinNode(newNode(nodeId++)); + fail("failed to fail node join when not a master"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + + logger.debug("--> testing joins fail post accumulation"); + ArrayList> pendingJoins = new ArrayList<>(); + nodeJoinController.startAccumulatingJoins(); + for (int i = 1 + randomInt(5); i > 0; i--) { + DiscoveryNode node = newNode(nodeId++); + final Future future = joinNodeAsync(node); + pendingJoins.add(future); + assertThat(future.isDone(), equalTo(false)); + } + nodeJoinController.stopAccumulatingJoins(); + for (Future future : pendingJoins) { + try { + future.get(); + fail("failed to fail accumulated node join when not a master"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + } + } + + public void testSimpleMasterElectionWithoutRequiredJoins() throws InterruptedException, ExecutionException { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + int nodeId = 0; + final int requiredJoins = 0; + logger.debug("--> using requiredJoins [{}]", requiredJoins); + // initial (failing) joins shouldn't count + for (int i = randomInt(5); i > 0; i--) { + try { + joinNode(newNode(nodeId++)); + fail("failed to fail node join when not a master"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + } + + nodeJoinController.startAccumulatingJoins(); + final SimpleFuture electionFuture = new SimpleFuture("master election"); + final Thread masterElection = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error from waitToBeElectedAsMaster", t); + electionFuture.markAsFailed(t); + } + + @Override + protected void doRun() throws Exception { + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.Callback() { + @Override + public void onElectedAsMaster(ClusterState state) { + assertThat("callback called with elected as master, but state disagrees", state.nodes().localNodeMaster(), equalTo(true)); + electionFuture.markAsDone(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error while waiting to be elected as master", t); + electionFuture.markAsFailed(t); + } + }); + } + }); + masterElection.start(); + + logger.debug("--> requiredJoins is set to 0. verifying election finished"); + electionFuture.get(); + } + + public void testSimpleMasterElection() throws InterruptedException, ExecutionException { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + int nodeId = 0; + final int requiredJoins = 1 + randomInt(5); + logger.debug("--> using requiredJoins [{}]", requiredJoins); + // initial (failing) joins shouldn't count + for (int i = randomInt(5); i > 0; i--) { + try { + joinNode(newNode(nodeId++)); + fail("failed to fail node join when not a master"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + } + + nodeJoinController.startAccumulatingJoins(); + final SimpleFuture electionFuture = new SimpleFuture("master election"); + final Thread masterElection = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error from waitToBeElectedAsMaster", t); + electionFuture.markAsFailed(t); + } + + @Override + protected void doRun() throws Exception { + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.Callback() { + @Override + public void onElectedAsMaster(ClusterState state) { + assertThat("callback called with elected as master, but state disagrees", state.nodes().localNodeMaster(), equalTo(true)); + electionFuture.markAsDone(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error while waiting to be elected as master", t); + electionFuture.markAsFailed(t); + } + }); + } + }); + masterElection.start(); + assertThat("election finished immediately but required joins is [" + requiredJoins + "]", electionFuture.isDone(), equalTo(false)); + + final int initialJoins = randomIntBetween(0, requiredJoins - 1); + final ArrayList pendingJoins = new ArrayList<>(); + ArrayList nodesToJoin = new ArrayList<>(); + for (int i = 0; i < initialJoins; i++) { + DiscoveryNode node = newNode(nodeId++, true); + for (int j = 1 + randomInt(3); j > 0; j--) { + nodesToJoin.add(node); + } + } + + // data nodes shouldn't count + for (int i = 0; i < requiredJoins; i++) { + DiscoveryNode node = newNode(nodeId++, false); + for (int j = 1 + randomInt(3); j > 0; j--) { + nodesToJoin.add(node); + } + } + + // add + + Collections.shuffle(nodesToJoin); + logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size()); + for (DiscoveryNode node : nodesToJoin) { + pendingJoins.add(joinNodeAsync(node)); + } + + logger.debug("--> asserting master election didn't finish yet"); + assertThat("election finished after [" + initialJoins + "] master nodes but required joins is [" + requiredJoins + "]", electionFuture.isDone(), equalTo(false)); + + final int finalJoins = requiredJoins - initialJoins + randomInt(5); + nodesToJoin.clear(); + for (int i = 0; i < finalJoins; i++) { + DiscoveryNode node = newNode(nodeId++, true); + for (int j = 1 + randomInt(3); j > 0; j--) { + nodesToJoin.add(node); + } + } + + for (int i = 0; i < requiredJoins; i++) { + DiscoveryNode node = newNode(nodeId++, false); + for (int j = 1 + randomInt(3); j > 0; j--) { + nodesToJoin.add(node); + } + } + + Collections.shuffle(nodesToJoin); + logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size()); + for (DiscoveryNode node : nodesToJoin) { + pendingJoins.add(joinNodeAsync(node)); + } + logger.debug("--> waiting for master election to with no exception"); + electionFuture.get(); + + logger.debug("--> waiting on all joins to be processed"); + for (SimpleFuture future : pendingJoins) { + logger.debug("waiting on {}", future); + future.get(); // throw any exception + } + + logger.debug("--> testing accumulation stopped"); + nodeJoinController.startAccumulatingJoins(); + nodeJoinController.stopAccumulatingJoins(); + + } + + + public void testMasterElectionTimeout() throws InterruptedException { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + int nodeId = 0; + final int requiredJoins = 1 + randomInt(5); + logger.debug("--> using requiredJoins [{}]", requiredJoins); + // initial (failing) joins shouldn't count + for (int i = randomInt(5); i > 0; i--) { + try { + joinNode(newNode(nodeId++)); + fail("failed to fail node join when not a master"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + } + + nodeJoinController.startAccumulatingJoins(); + final int initialJoins = randomIntBetween(0, requiredJoins - 1); + final ArrayList pendingJoins = new ArrayList<>(); + ArrayList nodesToJoin = new ArrayList<>(); + for (int i = 0; i < initialJoins; i++) { + DiscoveryNode node = newNode(nodeId++); + for (int j = 1 + randomInt(3); j > 0; j--) { + nodesToJoin.add(node); + } + } + Collections.shuffle(nodesToJoin); + logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size()); + for (DiscoveryNode node : nodesToJoin) { + pendingJoins.add(joinNodeAsync(node)); + } + + final AtomicReference failure = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueMillis(1), new NodeJoinController.Callback() { + @Override + public void onElectedAsMaster(ClusterState state) { + assertThat("callback called with elected as master, but state disagrees", state.nodes().localNodeMaster(), equalTo(true)); + latch.countDown(); + } + + @Override + public void onFailure(Throwable t) { + failure.set(t); + latch.countDown(); + } + }); + latch.await(); + logger.debug("--> verifying election timed out"); + assertThat(failure.get(), instanceOf(ElasticsearchTimeoutException.class)); + + logger.debug("--> verifying all joins are failed"); + for (SimpleFuture future : pendingJoins) { + logger.debug("waiting on {}", future); + try { + future.get(); // throw any exception + fail("failed to fail node join [" + future + "]"); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(NotMasterException.class)); + } + } + } + + public void testNewClusterStateOnExistingNodeJoin() throws InterruptedException, ExecutionException { + ClusterState state = clusterService.state(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); + final DiscoveryNode other_node = new DiscoveryNode("other_node", DummyTransportAddress.INSTANCE, Version.CURRENT); + nodesBuilder.put(other_node); + clusterService.setState(ClusterState.builder(state).nodes(nodesBuilder)); + + state = clusterService.state(); + joinNode(other_node); + assertTrue("failed to publish a new state upon existing join", clusterService.state() != state); + } + + public void testNormalConcurrentJoins() throws InterruptedException { + Thread[] threads = new Thread[3 + randomInt(5)]; + ArrayList nodes = new ArrayList<>(); + nodes.add(clusterService.localNode()); + final CyclicBarrier barrier = new CyclicBarrier(threads.length); + final List backgroundExceptions = new CopyOnWriteArrayList<>(); + for (int i = 0; i < threads.length; i++) { + final DiscoveryNode node = newNode(i); + final int iterations = rarely() ? randomIntBetween(1, 4) : 1; + nodes.add(node); + threads[i] = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error in join thread", t); + backgroundExceptions.add(t); + } + + @Override + protected void doRun() throws Exception { + barrier.await(); + for (int i = 0; i < iterations; i++) { + logger.debug("{} joining", node); + joinNode(node); + } + } + }, "t_" + i); + threads[i].start(); + } + + logger.info("--> waiting for joins to complete"); + for (Thread thread : threads) { + thread.join(); + } + + assertNodesInCurrentState(nodes); + } + + public void testElectionWithConcurrentJoins() throws InterruptedException, BrokenBarrierException { + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); + clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); + + nodeJoinController.startAccumulatingJoins(); + + Thread[] threads = new Thread[3 + randomInt(5)]; + final int requiredJoins = randomInt(threads.length); + ArrayList nodes = new ArrayList<>(); + nodes.add(clusterService.localNode()); + final CyclicBarrier barrier = new CyclicBarrier(threads.length + 1); + final List backgroundExceptions = new CopyOnWriteArrayList<>(); + for (int i = 0; i < threads.length; i++) { + final DiscoveryNode node = newNode(i, true); + final int iterations = rarely() ? randomIntBetween(1, 4) : 1; + nodes.add(node); + threads[i] = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error in join thread", t); + backgroundExceptions.add(t); + } + + @Override + protected void doRun() throws Exception { + barrier.await(); + for (int i = 0; i < iterations; i++) { + logger.debug("{} joining", node); + joinNode(node); + } + } + }, "t_" + i); + threads[i].start(); + } + + barrier.await(); + logger.info("--> waiting to be elected as master (required joins [{}])", requiredJoins); + final AtomicReference failure = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.Callback() { + @Override + public void onElectedAsMaster(ClusterState state) { + assertThat("callback called with elected as master, but state disagrees", state.nodes().localNodeMaster(), equalTo(true)); + latch.countDown(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error while waiting to be elected as master", t); + failure.set(t); + latch.countDown(); + } + }); + latch.await(); + ExceptionsHelper.reThrowIfNotNull(failure.get()); + + + logger.info("--> waiting for joins to complete"); + for (Thread thread : threads) { + thread.join(); + } + + assertNodesInCurrentState(nodes); + } + + + static class NoopRoutingService extends RoutingService { + + public NoopRoutingService(Settings settings) { + super(settings, null, null, new NoopAllocationService(settings)); + } + + @Override + protected void performReroute(String reason) { + + } + } + + static class NoopAllocationService extends AllocationService { + + public NoopAllocationService(Settings settings) { + super(settings, null, null, null); + } + + @Override + public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, boolean withReroute) { + return new RoutingAllocation.Result(false, clusterState.routingTable()); + } + + @Override + public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List failedShards) { + return new RoutingAllocation.Result(false, clusterState.routingTable()); + } + + @Override + public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) { + return new RoutingAllocation.Result(false, clusterState.routingTable()); + } + } + + protected void assertNodesInCurrentState(List expectedNodes) { + DiscoveryNodes discoveryNodes = clusterService.state().nodes(); + assertThat(discoveryNodes.prettyPrint() + "\nexpected: " + expectedNodes.toString(), discoveryNodes.size(), equalTo(expectedNodes.size())); + for (DiscoveryNode node : expectedNodes) { + assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.id()), equalTo(node)); + } + } + + static class SimpleFuture extends BaseFuture { + final String description; + + SimpleFuture(String description) { + this.description = description; + } + + public void markAsDone() { + set(null); + } + + public void markAsFailed(Throwable t) { + setException(t); + } + + @Override + public String toString() { + return "future [" + description + "]"; + } + } + + final static AtomicInteger joinId = new AtomicInteger(); + + private SimpleFuture joinNodeAsync(final DiscoveryNode node) throws InterruptedException { + final SimpleFuture future = new SimpleFuture("join of " + node + " (id [" + joinId.incrementAndGet() + "]"); + logger.debug("starting {}", future); + nodeJoinController.handleJoinRequest(node, new MembershipAction.JoinCallback() { + @Override + public void onSuccess() { + logger.debug("{} completed", future); + future.markAsDone(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error for {}", t, future); + future.markAsFailed(t); + } + }); + return future; + } + + private void joinNode(final DiscoveryNode node) throws InterruptedException, ExecutionException { + joinNodeAsync(node).get(); + } + + protected DiscoveryNode newNode(int i) { + return newNode(i, randomBoolean()); + } + + protected DiscoveryNode newNode(int i, boolean master) { + Map attributes = new HashMap<>(); + attributes.put("master", Boolean.toString(master)); + final String prefix = master ? "master_" : "data_"; + return new DiscoveryNode(prefix + i, i + "", new LocalTransportAddress("test_" + i), attributes, Version.CURRENT); + } +} diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index f4c96e6eda7..851580f733a 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; -import com.google.common.collect.Iterables; import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -48,9 +47,7 @@ import org.hamcrest.Matchers; import org.junit.Test; import java.io.IOException; -import java.lang.ref.Reference; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -219,7 +216,7 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { }); latch.await(); assertThat(reference.get(), notNullValue()); - assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master then the current one, rejecting ")); + assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master than the current one, rejecting")); } @Test @@ -228,7 +225,7 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { .put("discovery.type", "zen") // <-- To override the local setting if set externally .put("node.mode", "local") // <-- force local transport so we can fake a network address .build(); - String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0); + String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0_beta1); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_1_6_0); @@ -245,14 +242,14 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { }); assertThat(holder.get(), notNullValue()); - assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [1.6.0] that is lower than the minimum compatible version [2.0.0-SNAPSHOT]")); + assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [1.6.0] that is lower than the minimum compatible version [" + Version.V_2_0_0_beta1.minimumCompatibilityVersion() + "]")); } @Test public void testJoinElectedMaster_incompatibleMinVersion() { - ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY, Version.V_2_0_0); + ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY, Version.V_2_0_0_beta1); - DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_2_0_0); + DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_2_0_0_beta1); assertThat(electMasterService.electMaster(Collections.singletonList(node)), sameInstance(node)); node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_1_6_0); assertThat("Can't join master because version 1.6.0 is lower than the minimum compatable version 2.0.0 can support", electMasterService.electMaster(Collections.singletonList(node)), nullValue()); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index 169dbdbe4a2..bbd970c8b4a 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -75,7 +75,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()); fail("should ignore, because current state's master is not equal to new state's master"); } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString("cluster state from a different master then the current one, rejecting")); + assertThat(e.getMessage(), containsString("cluster state from a different master than the current one, rejecting")); } currentNodes = DiscoveryNodes.builder(); diff --git a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java index 3eba6c46c5a..a2ba2d39886 100644 --- a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.net.URL; import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; @@ -85,6 +86,19 @@ public class EnvironmentTests extends ElasticsearchTestCase { assertThat(environment.resolveRepoFile("/test/repos/../repos/repo1"), notNullValue()); assertThat(environment.resolveRepoFile("/somethingeles/repos/repo1"), nullValue()); assertThat(environment.resolveRepoFile("/test/other/repo"), notNullValue()); + + + assertThat(environment.resolveRepoURL(new URL("file:///test/repos/repo1")), notNullValue()); + assertThat(environment.resolveRepoURL(new URL("file:/test/repos/repo1")), notNullValue()); + assertThat(environment.resolveRepoURL(new URL("file://test/repos/repo1")), nullValue()); + assertThat(environment.resolveRepoURL(new URL("file:///test/repos/../repo1")), nullValue()); + assertThat(environment.resolveRepoURL(new URL("http://localhost/test/")), nullValue()); + + assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/repo1!/repo/")), notNullValue()); + assertThat(environment.resolveRepoURL(new URL("jar:file:/test/repos/repo1!/repo/")), notNullValue()); + assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/repo1!/repo/")).toString(), endsWith("repo1!/repo/")); + assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/../repo1!/repo/")), nullValue()); + assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue()); } } diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index ed160d92b32..036a89c7300 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -94,7 +94,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { Files.copy(resource, dst); MetaData read = format.read(dst); assertThat(read, notNullValue()); - assertThat(read.uuid(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg")); + assertThat(read.clusterUUID(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg")); // indices are empty since they are serialized separately } @@ -274,7 +274,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { final MetaData meta = randomMeta(); format.write(meta, v1, dirs); final MetaData metaData = format.loadLatestState(logger, dirs); - assertEquals(meta.uuid(), metaData.uuid()); + assertEquals(meta.clusterUUID(), metaData.clusterUUID()); final Path path = randomFrom(dirs); final Path[] files = FileSystemUtils.files(path.resolve("_state")); assertEquals(1, files.length); @@ -295,12 +295,12 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { final long v = randomInt(10); MetaData meta = randomMeta(); - String uuid = meta.uuid(); + String uuid = meta.clusterUUID(); // write a first state file in the old format final Path dir2 = randomFrom(dirs); MetaData meta2 = randomMeta(); - assertFalse(meta2.uuid().equals(uuid)); + assertFalse(meta2.clusterUUID().equals(uuid)); try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v)))) { xcontentBuilder.startObject(); MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params); @@ -313,7 +313,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { MetaData state = format.loadLatestState(logger, dirs); final Path path = randomFrom(dirs); assertTrue(Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (v+1) + ".st"))); - assertEquals(state.uuid(), uuid); + assertEquals(state.clusterUUID(), uuid); } @Test @@ -358,8 +358,8 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { Collections.shuffle(dirList, getRandom()); MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); - assertThat(loadedMetaData.uuid(), not(equalTo("_na_"))); - assertThat(loadedMetaData.uuid(), equalTo(latestMetaData.uuid())); + assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); + assertThat(loadedMetaData.clusterUUID(), equalTo(latestMetaData.clusterUUID())); ImmutableOpenMap indices = loadedMetaData.indices(); assertThat(indices.size(), equalTo(latestMetaData.indices().size())); for (IndexMetaData original : latestMetaData) { @@ -392,7 +392,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { private MetaData randomMeta() throws IOException { int numIndices = randomIntBetween(1, 10); MetaData.Builder mdBuilder = MetaData.builder(); - mdBuilder.generateUuidIfNeeded(); + mdBuilder.generateClusterUuidIfNeeded(); for (int i = 0; i < numIndices; i++) { mdBuilder.put(indexBuilder(randomAsciiOfLength(10) + "idx-"+i)); } diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java index 23361c83120..be048f1b9a3 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java @@ -282,7 +282,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2); } - String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(); + String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(); assertThat(metaDataUuid, not(equalTo("_na_"))); logger.info("--> closing first node, and indexing more data to the second node"); @@ -325,7 +325,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); - assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(), equalTo(metaDataUuid)); + assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(), equalTo(metaDataUuid)); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3); diff --git a/core/src/test/java/org/elasticsearch/get/GetActionTests.java b/core/src/test/java/org/elasticsearch/get/GetActionTests.java index d57f0b126cb..f04839b55f4 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionTests.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionTests.java @@ -54,13 +54,7 @@ import java.util.Set; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; public class GetActionTests extends ElasticsearchIntegrationTest { @@ -600,7 +594,8 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict, current [1], provided [2]")); + assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); //Version from Lucene index refresh(); @@ -622,7 +617,9 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict, current [1], provided [2]")); + assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); + for (int i = 0; i < 3; i++) { @@ -645,7 +642,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict, current [2], provided [1]")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); @@ -671,7 +668,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict, current [2], provided [1]")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java new file mode 100644 index 00000000000..c40f31f9acb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java @@ -0,0 +1,350 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.cache.recycler.MockBigArrays; +import org.elasticsearch.test.cache.recycler.MockPageCacheRecycler; +import org.elasticsearch.threadpool.ThreadPool; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.*; +import org.jboss.netty.handler.codec.http.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.net.SocketAddress; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.*; + +public class NettyHttpChannelTests extends ElasticsearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private NettyHttpServerTransport httpServerTransport; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Settings.EMPTY); + threadPool = new ThreadPool("test"); + MockPageCacheRecycler mockPageCacheRecycler = new MockPageCacheRecycler(Settings.EMPTY, threadPool); + bigArrays = new MockBigArrays(mockPageCacheRecycler, new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + if (httpServerTransport != null) { + httpServerTransport.close(); + } + } + + @Test + public void testCorsEnabledWithoutAllowOrigins() { + // Set up a HTTP transport with only the CORS enabled setting + Settings settings = Settings.builder() + .put(NettyHttpServerTransport.SETTING_CORS_ENABLED, true) + .build(); + httpServerTransport = new NettyHttpServerTransport(settings, networkService, bigArrays); + HttpRequest httpRequest = new TestHttpRequest(); + httpRequest.headers().add(HttpHeaders.Names.ORIGIN, "remote"); + httpRequest.headers().add(HttpHeaders.Names.USER_AGENT, "Mozilla fake"); + WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); + NettyHttpRequest request = new NettyHttpRequest(httpRequest, writeCapturingChannel); + + // send a response + NettyHttpChannel channel = new NettyHttpChannel(httpServerTransport, request, null, randomBoolean()); + channel.sendResponse(new TestReponse()); + + // inspect what was written + List writtenObjects = writeCapturingChannel.getWrittenObjects(); + assertThat(writtenObjects.size(), is(1)); + HttpResponse response = (HttpResponse) writtenObjects.get(0); + assertThat(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); + } + + @Test + public void testCorsEnabledWithAllowOrigins() { + // create a http transport with CORS enabled and allow origin configured + Settings settings = Settings.builder() + .put(NettyHttpServerTransport.SETTING_CORS_ENABLED, true) + .put(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN, "remote-host") + .build(); + httpServerTransport = new NettyHttpServerTransport(settings, networkService, bigArrays); + HttpRequest httpRequest = new TestHttpRequest(); + httpRequest.headers().add(HttpHeaders.Names.ORIGIN, "remote"); + httpRequest.headers().add(HttpHeaders.Names.USER_AGENT, "Mozilla fake"); + WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); + NettyHttpRequest request = new NettyHttpRequest(httpRequest, writeCapturingChannel); + + NettyHttpChannel channel = new NettyHttpChannel(httpServerTransport, request, null, randomBoolean()); + channel.sendResponse(new TestReponse()); + + // inspect what was written + List writtenObjects = writeCapturingChannel.getWrittenObjects(); + assertThat(writtenObjects.size(), is(1)); + HttpResponse response = (HttpResponse) writtenObjects.get(0); + assertThat(response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is("remote-host")); + } + + private static class WriteCapturingChannel implements Channel { + + private List writtenObjects = new ArrayList<>(); + + @Override + public Integer getId() { + return null; + } + + @Override + public ChannelFactory getFactory() { + return null; + } + + @Override + public Channel getParent() { + return null; + } + + @Override + public ChannelConfig getConfig() { + return null; + } + + @Override + public ChannelPipeline getPipeline() { + return null; + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public boolean isBound() { + return false; + } + + @Override + public boolean isConnected() { + return false; + } + + @Override + public SocketAddress getLocalAddress() { + return null; + } + + @Override + public SocketAddress getRemoteAddress() { + return null; + } + + @Override + public ChannelFuture write(Object message) { + writtenObjects.add(message); + return null; + } + + @Override + public ChannelFuture write(Object message, SocketAddress remoteAddress) { + writtenObjects.add(message); + return null; + } + + @Override + public ChannelFuture bind(SocketAddress localAddress) { + return null; + } + + @Override + public ChannelFuture connect(SocketAddress remoteAddress) { + return null; + } + + @Override + public ChannelFuture disconnect() { + return null; + } + + @Override + public ChannelFuture unbind() { + return null; + } + + @Override + public ChannelFuture close() { + return null; + } + + @Override + public ChannelFuture getCloseFuture() { + return null; + } + + @Override + public int getInterestOps() { + return 0; + } + + @Override + public boolean isReadable() { + return false; + } + + @Override + public boolean isWritable() { + return false; + } + + @Override + public ChannelFuture setInterestOps(int interestOps) { + return null; + } + + @Override + public ChannelFuture setReadable(boolean readable) { + return null; + } + + @Override + public boolean getUserDefinedWritability(int index) { + return false; + } + + @Override + public void setUserDefinedWritability(int index, boolean isWritable) { + + } + + @Override + public Object getAttachment() { + return null; + } + + @Override + public void setAttachment(Object attachment) { + + } + + @Override + public int compareTo(Channel o) { + return 0; + } + + public List getWrittenObjects() { + return writtenObjects; + } + } + + private static class TestHttpRequest implements HttpRequest { + + private HttpHeaders headers = new DefaultHttpHeaders(); + + @Override + public HttpMethod getMethod() { + return null; + } + + @Override + public void setMethod(HttpMethod method) { + + } + + @Override + public String getUri() { + return ""; + } + + @Override + public void setUri(String uri) { + + } + + @Override + public HttpVersion getProtocolVersion() { + return HttpVersion.HTTP_1_1; + } + + @Override + public void setProtocolVersion(HttpVersion version) { + + } + + @Override + public HttpHeaders headers() { + return headers; + } + + @Override + public ChannelBuffer getContent() { + return ChannelBuffers.EMPTY_BUFFER; + } + + @Override + public void setContent(ChannelBuffer content) { + + } + + @Override + public boolean isChunked() { + return false; + } + + @Override + public void setChunked(boolean chunked) { + + } + } + + private static class TestReponse extends RestResponse { + + @Override + public String contentType() { + return "text"; + } + + @Override + public BytesReference content() { + return BytesArray.EMPTY; + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java index 73ea27ae84a..a68a1654fe3 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -49,6 +50,7 @@ import org.junit.Test; import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -317,7 +319,7 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { } @Test - public void testPrimaryRelocationWithConcurrentIndexing() throws Exception { + public void testPrimaryRelocationWithConcurrentIndexing() throws Throwable { Settings nodeSettings = nodeSettings(); String node1 = internalCluster().startNode(nodeSettings); @@ -346,15 +348,19 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { final int numPhase2Docs = scaledRandomIntBetween(25, 200); final CountDownLatch phase1finished = new CountDownLatch(1); final CountDownLatch phase2finished = new CountDownLatch(1); - + final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); Thread thread = new Thread() { @Override public void run() { started.countDown(); while (counter.get() < (numPhase1Docs + numPhase2Docs)) { - final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", - Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); - assertTrue(indexResponse.isCreated()); + try { + final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", + Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); + assertTrue(indexResponse.isCreated()); + } catch (Throwable t) { + exceptions.add(t); + } final int docCount = counter.get(); if (docCount == numPhase1Docs) { phase1finished.countDown(); @@ -374,6 +380,7 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { // wait for more documents to be indexed post-recovery, also waits for // indexing thread to stop phase2finished.await(); + ExceptionsHelper.rethrowAndSuppress(exceptions); ensureGreen(IDX); thread.join(); logger.info("--> performing query"); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index fef1c7c993f..6efe7868858 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1733,7 +1733,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { for (Path indexFile : indexes.subList(0, scaledRandomIntBetween(1, indexes.size() / 2))) { final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT); Version version = Version.fromString(indexName.replace("index-", "")); - if (version.onOrAfter(Version.V_2_0_0)) { + if (version.onOrAfter(Version.V_2_0_0_beta1)) { continue; } Path unzipDir = createTempDir(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index e9a54feadd2..24e9be49b67 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -102,6 +102,41 @@ public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest { assertThat(orientation, equalTo(ShapeBuilder.Orientation.CCW)); } + /** + * Test that orientation parameter correctly parses + * @throws IOException + */ + public void testCoerceParsing() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("coerce", "true") + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + boolean coerce = ((GeoShapeFieldMapper)fieldMapper).coerce().value(); + assertThat(coerce, equalTo(true)); + + // explicit false coerce test + mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("coerce", "false") + .endObject().endObject() + .endObject().endObject().string(); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse(mapping); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + coerce = ((GeoShapeFieldMapper)fieldMapper).coerce().value(); + assertThat(coerce, equalTo(false)); + } + @Test public void testGeohashConfiguration() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index e1341cecf02..b6f53210c69 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -50,7 +50,7 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.Version.V_1_5_0; -import static org.elasticsearch.Version.V_2_0_0; +import static org.elasticsearch.Version.V_2_0_0_beta1; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; @@ -96,17 +96,17 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { @Test public void testDefaultValues() throws Exception { - for (Version version : Arrays.asList(V_1_5_0, V_2_0_0, randomVersion(random()))) { + for (Version version : Arrays.asList(V_1_5_0, V_2_0_0_beta1, randomVersion(random()))) { for (String mapping : Arrays.asList( XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string(), XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_timestamp").endObject().endObject().string())) { DocumentMapper docMapper = createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build()).mapperService().documentMapperParser().parse(mapping); assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(TimestampFieldMapper.Defaults.ENABLED.enabled)); - assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(version.onOrAfter(Version.V_2_0_0))); + assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(version.onOrAfter(Version.V_2_0_0_beta1))); assertThat(docMapper.timestampFieldMapper().fieldType().indexOptions(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.indexOptions())); assertThat(docMapper.timestampFieldMapper().path(), equalTo(TimestampFieldMapper.Defaults.PATH)); - assertThat(docMapper.timestampFieldMapper().fieldType().hasDocValues(), equalTo(version.onOrAfter(Version.V_2_0_0))); - String expectedFormat = version.onOrAfter(Version.V_2_0_0) ? TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT : + assertThat(docMapper.timestampFieldMapper().fieldType().hasDocValues(), equalTo(version.onOrAfter(Version.V_2_0_0_beta1))); + String expectedFormat = version.onOrAfter(Version.V_2_0_0_beta1) ? TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER_BEFORE_2_0.format(); assertThat(docMapper.timestampFieldMapper().fieldType().dateTimeFormatter().format(), equalTo(expectedFormat)); assertAcked(client().admin().indices().prepareDelete("test").execute().get()); diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 3f69980adaf..c794b517802 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -437,7 +437,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { @Test public void testFuzzyQueryWithFieldsBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); - Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").fuzziness(Fuzziness.fromSimilarity(0.1f)).prefixLength(1).boost(2.0f).buildAsBytes()).query(); + Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").fuzziness(Fuzziness.ONE).prefixLength(1).boost(2.0f).buildAsBytes()).query(); assertThat(parsedQuery, instanceOf(FuzzyQuery.class)); FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery; assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh"))); @@ -454,7 +454,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(parsedQuery, instanceOf(FuzzyQuery.class)); FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery; assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh"))); - assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length()))); + assertThat(fuzzyQuery.getMaxEdits(), equalTo(Fuzziness.AUTO.asDistance("sh"))); assertThat(fuzzyQuery.getPrefixLength(), equalTo(1)); assertThat(fuzzyQuery.getBoost(), equalTo(2.0f)); } @@ -1204,6 +1204,28 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertEquals(expected, parsedQuery.query()); } + @Test + public void testTermQueryParserShouldOnlyAllowSingleTerm() throws Exception { + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter-broken-multi-terms.json"); + assertQueryParsingFailureDueToMultipleTermsInTermFilter(query); + } + + @Test + public void testTermQueryParserShouldOnlyAllowSingleTermInAlternateFormat() throws Exception { + String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter-broken-multi-terms-2.json"); + assertQueryParsingFailureDueToMultipleTermsInTermFilter(query); + } + + private void assertQueryParsingFailureDueToMultipleTermsInTermFilter(String query) throws IOException { + IndexQueryParserService queryParser = queryParser(); + try { + queryParser.parse(query); + fail("Expected Query Parsing Exception but did not happen"); + } catch (QueryParsingException e) { + assertThat(e.getMessage(), containsString("[term] query does not support different field names, use [bool] query instead")); + } + } + @Test public void testTermsFilterQueryBuilder() throws Exception { IndexQueryParserService queryParser = queryParser(); diff --git a/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json index 3e3d30ffdc0..7636496adc4 100644 --- a/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json +++ b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json @@ -2,7 +2,7 @@ "fuzzy":{ "name.first":{ "value":"sh", - "fuzziness":0.1, + "fuzziness": "AUTO", "prefix_length":1, "boost":2.0 } diff --git a/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms-2.json b/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms-2.json new file mode 100644 index 00000000000..b71de530533 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms-2.json @@ -0,0 +1,10 @@ +{ + "filtered": { + "filter": { + "term": { + "name.first": { "value": "shay" }, + "name.last": { "value": "banon" } + } + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms.json b/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms.json new file mode 100644 index 00000000000..aabd6e48376 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/query/term-filter-broken-multi-terms.json @@ -0,0 +1,10 @@ +{ + "filtered":{ + "query":{ + "term":{ + "name.first": "shay", + "name.last" : "banon" + } + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 9a43ec1faae..0e1502aa5e9 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -19,14 +19,17 @@ package org.elasticsearch.index.shard; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; @@ -45,7 +48,9 @@ import org.elasticsearch.test.VersionUtils; import org.junit.Test; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.HashSet; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -53,6 +58,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -110,7 +116,6 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { public void testLockTryingToDelete() throws Exception { createIndex("test"); ensureGreen(); - //IndicesService indicesService = getInstanceFromNode(IndicesService.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); Path[] shardPaths = env.availableShardPaths(new ShardId("test", 0)); logger.info("--> paths: [{}]", shardPaths); @@ -118,7 +123,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { NodeEnvironment.acquireFSLockForPaths(Settings.EMPTY, shardPaths); fail("should not have been able to acquire the lock"); - } catch (ElasticsearchException e) { + } catch (LockObtainFailedException e) { assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); } // Test without the regular shard lock to assume we can acquire it @@ -128,7 +133,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { env.deleteShardDirectoryUnderLock(sLock, Settings.builder().build()); fail("should not have been able to delete the directory"); - } catch (ElasticsearchException e) { + } catch (LockObtainFailedException e) { assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); } } @@ -147,39 +152,39 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); // test if we still write it even if the shard is not active ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(), shard.shardRouting.currentNodeId(), null, null, true, ShardRoutingState.INITIALIZING, shard.shardRouting.version() + 1); shard.persistMetadata(inactiveRouting, shard.shardRouting); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, getShardStateMetadata(shard)); - assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); shard.updateRoutingEntry(new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1), false); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertFalse("shard state persisted despite of persist=false", shardStateMetaData.equals(getShardStateMetadata(shard))); - assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); + assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID))); } public void testDeleteShardState() throws IOException { @@ -232,7 +237,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { if (shardRouting == null) { return null; } else { - return new ShardStateMetaData(shardRouting.version(), shardRouting.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)); + return new ShardStateMetaData(shardRouting.version(), shardRouting.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)); } } @@ -406,4 +411,28 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); assertEquals(400, indexSettingsService.getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); } + + public void testRecoverIntoLeftover() throws IOException { + createIndex("test"); + ensureGreen("test"); + client().prepareIndex("test", "bar", "1").setSource("{}").setRefresh(true).get(); + client().admin().indices().prepareFlush("test").get(); + SearchResponse response = client().prepareSearch("test").get(); + assertHitCount(response, 1l); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("test"); + IndexShard shard = test.shard(0); + ShardPath shardPath = shard.shardPath(); + Path dataPath = shardPath.getDataPath(); + client().admin().indices().prepareClose("test").get(); + Path tempDir = createTempDir(); + Files.move(dataPath, tempDir.resolve("test")); + client().admin().indices().prepareDelete("test").get(); + Files.createDirectories(dataPath.getParent()); + Files.move(tempDir.resolve("test"), dataPath); + createIndex("test"); + ensureGreen("test"); + response = client().prepareSearch("test").get(); + assertHitCount(response, 0l); + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 6186d8930bd..075bad28324 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -35,7 +35,7 @@ public class ShardPathTests extends ElasticsearchTestCase { public void testLoadShardPath() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { - Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF"); + Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF"); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", 0); Path[] paths = env.availableShardPaths(shardId); @@ -53,7 +53,7 @@ public class ShardPathTests extends ElasticsearchTestCase { @Test(expected = IllegalStateException.class) public void testFailLoadShardPathOnMultiState() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { - Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF"); + Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF"); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", 0); Path[] paths = env.availableShardPaths(shardId); @@ -67,7 +67,7 @@ public class ShardPathTests extends ElasticsearchTestCase { @Test(expected = IllegalStateException.class) public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { - Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "foobar"); + Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "foobar"); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", 0); Path[] paths = env.availableShardPaths(shardId); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java index baa1f087cb7..93722be9a73 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java @@ -191,7 +191,7 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { CheckIndex.Status status = checkIndex.checkIndex(); if (!status.clean) { logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8)); - throw new IndexShardException(sid, "index check failure"); + throw new IOException("index check failure"); } } } catch (Throwable t) { @@ -729,16 +729,4 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { } return files; } - - private void disableAllocation(String index) { - client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put( - "index.routing.allocation.enable", "none" - )).get(); - } - - private void enableAllocation(String index) { - client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put( - "index.routing.allocation.enable", "all" - )).get(); - } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 80affc1d1b2..416a2ea760b 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1168,7 +1168,7 @@ public class TranslogTests extends ElasticsearchTestCase { for (Path indexFile : indexes) { final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT); Version version = Version.fromString(indexName.replace("index-", "")); - if (version.onOrAfter(Version.V_2_0_0)) { + if (version.onOrAfter(Version.V_2_0_0_beta1)) { continue; } Path unzipDir = createTempDir(); @@ -1195,7 +1195,7 @@ public class TranslogTests extends ElasticsearchTestCase { final long generation = parseLegacyTranslogFile(tlogFiles[0]); assertTrue(generation >= 1); - logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); + logger.info("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), translog, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool()); upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation)); Translog.upgradeLegacyTranslog(logger, upgradeConfig); @@ -1203,7 +1203,7 @@ public class TranslogTests extends ElasticsearchTestCase { assertEquals(generation + 1, upgraded.getGeneration().translogFileGeneration); assertEquals(upgraded.getRecoveredReaders().size(), 1); final long headerSize; - if (version.before(Version.V_1_4_0)) { + if (version.before(Version.V_1_4_0_Beta1)) { assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReader.class); headerSize = 0; } else { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index baec5760b7b..ff0110b39f4 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; @@ -50,6 +51,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.search.warmer.IndexWarmersMetaData; @@ -525,7 +527,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest .setQuery(matchAllQuery()) .execute().actionGet(); fail("Exception should have been thrown."); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { } try { @@ -533,7 +535,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest .setQuery(matchAllQuery()) .execute().actionGet(); fail("Exception should have been thrown."); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { } //you should still be able to run empty searches without things blowing up @@ -892,8 +894,8 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest } else { try { requestBuilder.get(); - fail("IndexMissingException or IndexClosedException was expected"); - } catch (IndexMissingException | IndexClosedException e) {} + fail("IndexNotFoundException or IndexClosedException was expected"); + } catch (IndexNotFoundException | IndexClosedException e) {} } } else { if (requestBuilder instanceof SearchRequestBuilder) { diff --git a/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java index f72609298e4..0191c56831c 100644 --- a/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java +++ b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java @@ -18,11 +18,12 @@ */ package org.elasticsearch.indices.exists.types; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -61,11 +62,11 @@ public class TypesExistsTests extends ElasticsearchIntegrationTest { try { client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet(); fail("Exception should have been thrown"); - } catch (IndexMissingException e) {} + } catch (IndexNotFoundException e) {} try { client.admin().indices().prepareTypesExists("notExist").setTypes("type0").execute().actionGet(); fail("Exception should have been thrown"); - } catch (IndexMissingException e) {} + } catch (IndexNotFoundException e) {} response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet(); assertThat(response.isExists(), equalTo(true)); response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java index 5d65c1acb7d..8cd791b5c06 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java @@ -23,10 +23,12 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchSingleNodeTest; @@ -136,7 +138,8 @@ public class SyncedFlushSingleNodeTest extends ElasticsearchSingleNodeTest { listener.latch.await(); assertNotNull(listener.error); assertNull(listener.result); - assertEquals("missing", listener.error.getMessage()); + assertEquals(ShardNotFoundException.class, listener.error.getClass()); + assertEquals("no such shard", listener.error.getMessage()); final ShardId shardId = shard.shardId(); @@ -149,7 +152,7 @@ public class SyncedFlushSingleNodeTest extends ElasticsearchSingleNodeTest { assertEquals("closed", listener.error.getMessage()); listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("nosuchindex", 0), listener); + flushService.attemptSyncedFlush(new ShardId("index not found", 0), listener); listener.latch.await(); assertNotNull(listener.error); assertNull(listener.result); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java index 901f9d74bbb..878a9aaf4c5 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java @@ -23,7 +23,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; @@ -88,10 +90,6 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { reset(); } - private String randomRidiculouslySmallLimit() { - return randomFrom(Arrays.asList("100b", "100")); - } - /** Returns true if any of the nodes used a noop breaker */ private boolean noopBreakerUsed() { NodesStatsResponse stats = client().admin().cluster().prepareNodesStats().setBreaker(true).get(); @@ -107,7 +105,6 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { } @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/8710") public void testMemoryBreaker() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); @@ -124,22 +121,19 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { } indexRandom(true, false, true, reqs); - // execute a search that loads field data (sorting on the "test" field) - SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); - searchRequest.get(); - // clear field data cache (thus setting the loaded field data back to 0) clearFieldData(); // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, randomRidiculouslySmallLimit()) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); // execute a search that loads field data (sorting on the "test" field) // again, this time it should trip the breaker + SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString("Data too large, data for [test] would be larger than limit of [100/100b]")); @@ -153,7 +147,6 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { } @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270") public void testRamAccountingTermsEnum() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); @@ -165,7 +158,7 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("ramtest").setSource("{\"mappings\": {\"type\": {\"properties\": {\"test\": " + "{\"type\": \"string\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}")); - ensureGreen(TimeValue.timeValueSeconds(10), "ramtest"); + ensureGreen("ramtest"); // index some different terms so we have some field data for loading int docCount = scaledRandomIntBetween(300, 1000); @@ -173,7 +166,7 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { for (long id = 0; id < docCount; id++) { reqs.add(client.prepareIndex("ramtest", "type", Long.toString(id)).setSource("test", "value" + id)); } - indexRandom(true, reqs); + indexRandom(true, false, true, reqs); // execute a search that loads field data (sorting on the "test" field) client.prepareSearch("ramtest").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get(); @@ -183,7 +176,7 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, randomRidiculouslySmallLimit()) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java index 4ba97227750..d1b461ef207 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java +++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.state; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -31,8 +32,8 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -63,19 +64,19 @@ public class OpenCloseIndexTests extends ElasticsearchIntegrationTest { assertIndexIsOpened("test1"); } - @Test(expected = IndexMissingException.class) + @Test(expected = IndexNotFoundException.class) public void testSimpleCloseMissingIndex() { Client client = client(); client.admin().indices().prepareClose("test1").execute().actionGet(); } - @Test(expected = IndexMissingException.class) + @Test(expected = IndexNotFoundException.class) public void testSimpleOpenMissingIndex() { Client client = client(); client.admin().indices().prepareOpen("test1").execute().actionGet(); } - @Test(expected = IndexMissingException.class) + @Test(expected = IndexNotFoundException.class) public void testCloseOneMissingIndex() { Client client = client(); createIndex("test1"); @@ -96,7 +97,7 @@ public class OpenCloseIndexTests extends ElasticsearchIntegrationTest { assertIndexIsClosed("test1"); } - @Test(expected = IndexMissingException.class) + @Test(expected = IndexNotFoundException.class) public void testOpenOneMissingIndex() { Client client = client(); createIndex("test1"); diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java index 9c1a78f2f09..0d162b7c52c 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -141,7 +141,7 @@ public class SimpleIndexStateTests extends ElasticsearchIntegrationTest { logger.info("--> deleting test index...."); try { client().admin().indices().prepareDelete("test").get(); - } catch (IndexMissingException ex) { + } catch (IndexNotFoundException ex) { // Ignore } diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 131e8ad73df..366a4cb96fd 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -20,9 +20,7 @@ package org.elasticsearch.indices.template; import com.google.common.collect.Lists; import com.google.common.collect.Sets; - import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; @@ -33,7 +31,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; @@ -44,12 +41,10 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Set; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; @@ -659,17 +654,21 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { assertThat(response.getItems()[0].getId(), equalTo("test")); assertThat(response.getItems()[0].getVersion(), equalTo(1l)); - try { - client().prepareIndex("d1", "test", "test").setSource("{}").get(); - fail(); - } catch (Exception e) { - assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]")); - } + // Before 2.0 alias filters were parsed at alias creation time, in order + // for filters to work correctly ES required that fields mentioned in those + // filters exist in the mapping. + // From 2.0 and higher alias filters are parsed at request time and therefor + // fields mentioned in filters don't need to exist in the mapping. + // So the aliases defined in the index template for this index will not fail + // even though the fields in the alias fields don't exist yet and indexing into + // an index that doesn't exist yet will succeed + client().prepareIndex("d1", "test", "test").setSource("{}").get(); + response = client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get(); - assertThat(response.hasFailures(), is(true)); - assertThat(response.getItems()[0].isFailed(), equalTo(true)); - assertThat(response.getItems()[0].getFailureMessage(), containsString("failed to parse filter for alias [alias4]")); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); } } diff --git a/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java b/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java index 3ad69b71bd4..20038763892 100644 --- a/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java +++ b/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.mget; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequest; @@ -57,7 +58,8 @@ public class SimpleMgetTests extends ElasticsearchIntegrationTest { assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); - assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("[nonExistingIndex] missing")); + assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("no such index")); + assertThat(((ElasticsearchException)mgetResponse.getResponses()[1].getFailure().getFailure()).getIndex(), is("nonExistingIndex")); mgetResponse = client().prepareMultiGet() @@ -66,7 +68,9 @@ public class SimpleMgetTests extends ElasticsearchIntegrationTest { assertThat(mgetResponse.getResponses().length, is(1)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); - assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("[nonExistingIndex] missing")); + assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("no such index")); + assertThat(((ElasticsearchException)mgetResponse.getResponses()[0].getFailure().getFailure()).getIndex(), is("nonExistingIndex")); + } diff --git a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java index cfded5fe345..5d71d3034e0 100644 --- a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java +++ b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java @@ -86,33 +86,33 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { .execute().actionGet(); MultiPercolateResponse.Item item = response.getItems()[0]; - assertMatchCount(item.response(), 2l); + assertMatchCount(item.getResponse(), 2l); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(item.errorMessage(), nullValue()); + assertThat(item.getErrorMessage(), nullValue()); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4")); item = response.getItems()[1]; - assertThat(item.errorMessage(), nullValue()); + assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.response(), 2l); + assertMatchCount(item.getResponse(), 2l); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4")); item = response.getItems()[2]; - assertThat(item.errorMessage(), nullValue()); - assertMatchCount(item.response(), 4l); + assertThat(item.getErrorMessage(), nullValue()); + assertMatchCount(item.getResponse(), 4l); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4")); item = response.getItems()[3]; - assertThat(item.errorMessage(), nullValue()); - assertMatchCount(item.response(), 1l); + assertThat(item.getErrorMessage(), nullValue()); + assertMatchCount(item.getResponse(), 1l); assertThat(item.getResponse().getMatches(), arrayWithSize(1)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4")); item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); - assertThat(item.errorMessage(), notNullValue()); - assertThat(item.errorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), notNullValue()); + assertThat(item.getErrorMessage(), containsString("document missing")); } @Test @@ -165,33 +165,33 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { .execute().actionGet(); MultiPercolateResponse.Item item = response.getItems()[0]; - assertMatchCount(item.response(), 2l); + assertMatchCount(item.getResponse(), 2l); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); - assertThat(item.errorMessage(), nullValue()); + assertThat(item.getErrorMessage(), nullValue()); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4")); item = response.getItems()[1]; - assertThat(item.errorMessage(), nullValue()); + assertThat(item.getErrorMessage(), nullValue()); - assertMatchCount(item.response(), 2l); + assertMatchCount(item.getResponse(), 2l); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4")); item = response.getItems()[2]; - assertThat(item.errorMessage(), nullValue()); - assertMatchCount(item.response(), 4l); + assertThat(item.getErrorMessage(), nullValue()); + assertMatchCount(item.getResponse(), 4l); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4")); item = response.getItems()[3]; - assertThat(item.errorMessage(), nullValue()); - assertMatchCount(item.response(), 1l); + assertThat(item.getErrorMessage(), nullValue()); + assertMatchCount(item.getResponse(), 1l); assertThat(item.getResponse().getMatches(), arrayWithSize(1)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4")); item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); - assertThat(item.errorMessage(), notNullValue()); - assertThat(item.errorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), notNullValue()); + assertThat(item.getErrorMessage(), containsString("document missing")); } @Test @@ -223,7 +223,7 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(false)); - assertMatchCount(item.response(), numQueries); + assertMatchCount(item.getResponse(), numQueries); assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); } @@ -240,7 +240,7 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(true)); - assertThat(item.errorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), containsString("document missing")); assertThat(item.getResponse(), nullValue()); } @@ -260,7 +260,7 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest + 1)); assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); - assertMatchCount(response.items()[numPercolateRequest].response(), numQueries); + assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); } @@ -292,7 +292,7 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(false)); - assertMatchCount(item.response(), numQueries); + assertMatchCount(item.getResponse(), numQueries); assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); } @@ -333,7 +333,7 @@ public class MultiPercolatorTests extends ElasticsearchIntegrationTest { response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest + 1)); assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); - assertMatchCount(response.items()[numPercolateRequest].response(), numQueries); + assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); } diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java index 485721b31d8..1dd2878532e 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java @@ -1701,12 +1701,11 @@ public class PercolatorTests extends ElasticsearchIntegrationTest { ensureGreen("idx"); try { - client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:red")).endObject()) - .get(); + client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:red")).endObject()) + .get(); fail(); } catch (PercolatorException e) { - } PercolateResponse percolateResponse = client().preparePercolate().setDocumentType("type") @@ -2056,7 +2055,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest { @Test public void testParentChild() throws Exception { - // We don't fail p/c queries, but those queries are unsuable because only one document can be provided in + // We don't fail p/c queries, but those queries are unusable because only a single document can be provided in // the percolate api assertAcked(prepareCreate("index").addMapping("child", "_parent", "type=parent").addMapping("parent")); @@ -2065,5 +2064,37 @@ public class PercolatorTests extends ElasticsearchIntegrationTest { .execute().actionGet(); } + @Test + public void testPercolateDocumentWithParentField() throws Exception { + assertAcked(prepareCreate("index").addMapping("child", "_parent", "type=parent").addMapping("parent")); + client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) + .execute().actionGet(); + + // Just percolating a document that has a _parent field in its mapping should just work: + PercolateResponse response = client().preparePercolate() + .setDocumentType("parent") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("field", "value")) + .get(); + assertMatchCount(response, 1); + assertThat(response.getMatches()[0].getId().string(), equalTo("1")); + } + + @Test + public void testFilterByNow() throws Exception { + client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("created", "2015-07-10T14:41:54+0000").endObject()) + .get(); + refresh(); + + PercolateResponse response = client().preparePercolate() + .setIndices("index") + .setDocumentType("type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .setPercolateQuery(rangeQuery("created").lte("now")) + .get(); + assertMatchCount(response, 1); + } + } diff --git a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java index 9b2b11fddad..0ebbda7d313 100644 --- a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.routing; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -90,36 +92,38 @@ public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest { client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet(); client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet(); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias"), nullValue()); - assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", "alias"), equalTo(newMap("test1", newSet("0", "1")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("0", "alias10"), equalTo(newMap("test1", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("1", "alias10"), nullValue()); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0")))); + ClusterState state = clusterService().state(); + IndexNameExpressionResolver indexNameExpressionResolver = internalCluster().getInstance(IndexNameExpressionResolver.class); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias"), nullValue()); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1", "alias"), equalTo(newMap("test1", newSet("0", "1")))); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias10"), equalTo(newMap("test1", newSet("0")))); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias10"), equalTo(newMap("test1", newSet("0")))); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", "alias10"), equalTo(newMap("test1", newSet("0")))); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", "alias10"), nullValue()); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias20"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"alias10", "alias20"}), equalTo(newMap("test1", newSet("0"), "test2", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"alias10", "alias21"}), equalTo(newMap("test1", newSet("0"), "test2", newSet("1")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias20", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"alias20", "alias21"}), equalTo(newMap("test2", newSet("0", "1")))); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"test1", "alias10"}), nullValue()); - assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "test1"}), nullValue()); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"test1", "alias10"}), nullValue()); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"alias10", "test1"}), nullValue()); - assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias20"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", new String[]{"alias10", "alias20"}), equalTo(newMap("test1", newSet("0"), "test2", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", new String[]{"alias10", "alias20"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1", new String[]{"alias10", "alias20"}), equalTo(newMap("test1", newSet("0"), "test2", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias20"}), nullValue()); - assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", new String[]{"alias10", "alias20"}), nullValue()); + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", new String[]{"alias10", "alias21"}), equalTo(newMap("test1", newSet("0")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", new String[]{"alias10", "alias21"}), equalTo(newMap("test2", newSet("1")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"alias10", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2", new String[]{"alias10", "alias21"}), equalTo(newMap("test1", newSet("0"), "test2", newSet("1")))); - assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"test1", "alias10", "alias21"}), + assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2", new String[]{"test1", "alias10", "alias21"}), equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1")))); } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java b/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java index db8770a2207..1a57c5155e2 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java @@ -26,7 +26,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -79,7 +79,7 @@ public class ScriptIndexSettingsTest extends ElasticsearchIntegrationTest{ try { GetIndexedScriptResponse response = client().prepareGetIndexedScript("groovy","foobar").get(); assertTrue(false); //This should not happen - } catch (IndexMissingException ime) { + } catch (IndexNotFoundException ime) { assertTrue(true); } } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 1ea2145e03b..188cd8d7650 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -287,17 +287,17 @@ public class ScriptModesTests extends ElasticsearchTestCase { } @Override - public ExecutableScript executable(Object compiledScript, @Nullable Map vars) { + public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars) { return null; } @Override - public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map vars) { + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars) { return null; } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { return null; } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 19ada40353a..ca7401e2f3f 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -148,7 +148,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script")); + assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); } } @@ -171,7 +171,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { randomFrom(scriptContexts)); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); - assertThat(compiledScript1, sameInstance(compiledScript2)); + assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test @@ -181,7 +181,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { randomFrom(scriptContexts)); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), randomFrom(scriptContexts)); - assertThat(compiledScript1, sameInstance(compiledScript2)); + assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test @@ -192,7 +192,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { randomFrom(scriptContexts)); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), randomFrom(scriptContexts)); - assertThat(compiledScript1, sameInstance(compiledScript2)); + assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test @@ -431,17 +431,17 @@ public class ScriptServiceTests extends ElasticsearchTestCase { } @Override - public ExecutableScript executable(final Object compiledScript, @Nullable Map vars) { + public ExecutableScript executable(final CompiledScript compiledScript, @Nullable Map vars) { return null; } @Override - public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map vars) { + public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars) { return null; } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { return null; } diff --git a/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index 0156c727899..098cbb514a5 100644 --- a/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptService.ScriptType; @@ -360,8 +361,12 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(AggregationBuilders.stats("int_agg").field("x").script("_value * 3").lang(ExpressionScriptEngineService.NAME)) - .addAggregation(AggregationBuilders.stats("double_agg").field("y").script("_value - 1.1").lang(ExpressionScriptEngineService.NAME)); + .addAggregation( + AggregationBuilders.stats("int_agg").field("x") + .script(new Script("_value * 3", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))) + .addAggregation( + AggregationBuilders.stats("double_agg").field("y") + .script(new Script("_value - 1.1", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))); SearchResponse rsp = req.get(); assertEquals(3, rsp.getHits().getTotalHits()); @@ -414,8 +419,9 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { vars.put("xyz", -1); Expression expr = JavascriptCompiler.compile("a+b+xyz"); + CompiledScript compiledScript = new CompiledScript(ScriptType.INLINE, "", "expression", expr); - ExpressionExecutableScript ees = new ExpressionExecutableScript(expr, vars); + ExpressionExecutableScript ees = new ExpressionExecutableScript(compiledScript, vars); assertEquals((Double) ees.run(), 4.5, 0.001); ees.setNextVar("b", -2.5); @@ -431,7 +437,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { try { vars = new HashMap<>(); vars.put("a", 1); - ees = new ExpressionExecutableScript(expr, vars); + ees = new ExpressionExecutableScript(compiledScript, vars); ees.run(); fail("An incorrect number of variables were allowed to be used in an expression."); } catch (ScriptException se) { @@ -444,7 +450,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { vars.put("a", 1); vars.put("b", 3); vars.put("c", -1); - ees = new ExpressionExecutableScript(expr, vars); + ees = new ExpressionExecutableScript(compiledScript, vars); ees.run(); fail("A variable was allowed to be set that does not exist in the expression."); } catch (ScriptException se) { @@ -457,7 +463,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { vars.put("a", 1); vars.put("b", 3); vars.put("xyz", "hello"); - ees = new ExpressionExecutableScript(expr, vars); + ees = new ExpressionExecutableScript(compiledScript, vars); ees.run(); fail("A non-number was allowed to be use in the expression."); } catch (ScriptException se) { diff --git a/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java b/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java index ed7de33cde1..beea87ec603 100644 --- a/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java +++ b/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java @@ -20,6 +20,8 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Before; import org.junit.Test; @@ -52,7 +54,7 @@ public class MustacheScriptEngineTest extends ElasticsearchTestCase { + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); - BytesReference o = (BytesReference) qe.execute(qe.compile(template), vars); + BytesReference o = (BytesReference) qe.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); @@ -63,7 +65,7 @@ public class MustacheScriptEngineTest extends ElasticsearchTestCase { Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); vars.put("body_val", "\"quick brown\""); - BytesReference o = (BytesReference) qe.execute(qe.compile(template), vars); + BytesReference o = (BytesReference) qe.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java index daa1914fce9..c7d38ad53ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java @@ -535,7 +535,7 @@ public class DoubleTermsTests extends AbstractTermsTests { .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("(long) _value / 1000 + 1"))) + .script(new Script("(long) (_value / 1000 + 1)"))) .execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java index 1d62df80c0d..1f0a47522cd 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java @@ -474,21 +474,15 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { @Test public void testFieldCollapsing() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing") + SearchResponse response = client() + .prepareSearch("idx") + .setTypes("field-collapsing") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) .setQuery(matchQuery("text", "term rare")) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field("group") - .order(Terms.Order.aggregation("max_score", false)) - .subAggregation( - topHits("hits").setSize(1) - ) - .subAggregation( - max("max_score").script("_score.doubleValue()") - ) - ) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()).field("group") + .order(Terms.Order.aggregation("max_score", false)).subAggregation(topHits("hits").setSize(1)) + .subAggregation(max("max_score").script(new Script("_score.doubleValue()")))).get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java index f416b7df046..e8c42fb9747 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java @@ -103,5 +103,4 @@ public abstract class AbstractNumericTests extends ElasticsearchIntegrationTest public abstract void testScript_MultiValued_WithParams() throws Exception; - } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java index 27e6830f0af..3f7719a8e62 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; @@ -136,7 +141,7 @@ public class AvgTests extends AbstractNumericTests { public void testSingleValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value").script("_value + 1")) + .addAggregation(avg("avg").field("value").script(new Script("_value + 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -150,9 +155,11 @@ public class AvgTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("value").script("_value + inc").param("inc", 1)) + .addAggregation(avg("avg").field("value").script(new Script("_value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -197,7 +204,7 @@ public class AvgTests extends AbstractNumericTests { public void testMultiValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("values").script("_value + 1")) + .addAggregation(avg("avg").field("values").script(new Script("_value + 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -211,9 +218,11 @@ public class AvgTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").field("values").script("_value + inc").param("inc", 1)) + .addAggregation(avg("avg").field("values").script(new Script("_value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -229,7 +238,7 @@ public class AvgTests extends AbstractNumericTests { public void testScript_SingleValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("doc['value'].value")) + .addAggregation(avg("avg").script(new Script("doc['value'].value"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -243,9 +252,11 @@ public class AvgTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(avg("avg").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -259,9 +270,11 @@ public class AvgTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(avg("avg").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -277,7 +290,7 @@ public class AvgTests extends AbstractNumericTests { public void testScript_MultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + 1 ]")) + .addAggregation(avg("avg").script(new Script("[ doc['value'].value, doc['value'].value + 1 ]"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -293,7 +306,7 @@ public class AvgTests extends AbstractNumericTests { public void testScript_ExplicitMultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + 1 ]")) + .addAggregation(avg("avg").script(new Script("[ doc['value'].value, doc['value'].value + 1 ]"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -307,9 +320,12 @@ public class AvgTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1)) + .addAggregation( + avg("avg").script(new Script("[ doc['value'].value, doc['value'].value + inc ]", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -319,5 +335,4 @@ public class AvgTests extends AbstractNumericTests { assertThat(avg.getName(), equalTo("avg")); assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20)); } - } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java index df5ee5c534e..bf708ec5c50 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -323,7 +324,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void singleValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['str_value'].value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).script(new Script("doc['str_value'].value"))) .execute().actionGet(); assertSearchResponse(response); @@ -337,7 +339,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void multiValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['str_values'].values")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).script(new Script("doc['str_values'].values"))) .execute().actionGet(); assertSearchResponse(response); @@ -351,7 +354,9 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void singleValuedNumericScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['" + singleNumericField(false) + "'].value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).script( + new Script("doc['" + singleNumericField(false) + "'].value"))) .execute().actionGet(); assertSearchResponse(response); @@ -365,7 +370,9 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void multiValuedNumericScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['" + multiNumericField(false) + "'].values")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).script( + new Script("doc['" + multiNumericField(false) + "'].values"))) .execute().actionGet(); assertSearchResponse(response); @@ -379,7 +386,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void singleValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value").script("_value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value").script(new Script("_value"))) .execute().actionGet(); assertSearchResponse(response); @@ -393,7 +401,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void multiValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values").script("_value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values").script(new Script("_value"))) .execute().actionGet(); assertSearchResponse(response); @@ -407,7 +416,9 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void singleValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false)).script("_value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false)) + .script(new Script("_value"))) .execute().actionGet(); assertSearchResponse(response); @@ -421,7 +432,9 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Test public void multiValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false)).script("_value")) + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false)) + .script(new Script("_value"))) .execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java index 248b3633db9..3d9a0d46b55 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java @@ -20,11 +20,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; @@ -218,7 +223,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double expectedMaxValue = 10.0; assertThat(stats.getMax(), equalTo(expectedMaxValue)); assertThat((double) global.getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; assertThat(stats.getSum(), equalTo(expectedSumValue)); assertThat((double) global.getProperty("stats.sum"), equalTo(expectedSumValue)); long expectedCountValue = 10; @@ -266,7 +271,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").script("_value + 1").sigma(sigma)) + .addAggregation(extendedStats("stats").field("value").script(new Script("_value + 1")).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -288,10 +293,14 @@ public class ExtendedStatsTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").script("_value + inc").param("inc", 1).sigma(sigma)) + .addAggregation( + extendedStats("stats").field("value").script(new Script("_value + inc", ScriptType.INLINE, null, params)) + .sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -341,7 +350,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("values").script("_value - 1").sigma(sigma)) + .addAggregation(extendedStats("stats").field("values").script(new Script("_value - 1")).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -363,10 +372,14 @@ public class ExtendedStatsTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("values").script("_value - dec").param("dec", 1).sigma(sigma)) + .addAggregation( + extendedStats("stats").field("values").script(new Script("_value - dec", ScriptType.INLINE, null, params)) + .sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -391,7 +404,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("doc['value'].value").sigma(sigma)) + .addAggregation(extendedStats("stats").script(new Script("doc['value'].value")).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -413,10 +426,13 @@ public class ExtendedStatsTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1).sigma(sigma)) + .addAggregation( + extendedStats("stats").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params)).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -438,10 +454,13 @@ public class ExtendedStatsTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1).sigma(sigma)) + .addAggregation( + extendedStats("stats").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params)).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -466,7 +485,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("doc['values'].values").sigma(sigma)) + .addAggregation(extendedStats("stats").script(new Script("doc['values'].values")).sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -491,7 +510,7 @@ public class ExtendedStatsTests extends AbstractNumericTests { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("doc['values'].values").sigma(sigma)) + .addAggregation(extendedStats("stats").script(new Script("doc['values'].values")).sigma(sigma)) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -515,10 +534,15 @@ public class ExtendedStatsTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script("[ doc['value'].value, doc['value'].value - dec ]").param("dec", 1).sigma(sigma)) + .addAggregation( + extendedStats("stats").script( + new Script("[ doc['value'].value, doc['value'].value - dec ]", ScriptType.INLINE, null, params)) + .sigma(sigma)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java index 45a9a3afec5..66a34b65138 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -150,7 +155,7 @@ public class MaxTests extends AbstractNumericTests { public void testSingleValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").field("value").script("_value + 1")) + .addAggregation(max("max").field("value").script(new Script("_value + 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -164,9 +169,11 @@ public class MaxTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").field("value").script("_value + inc").param("inc", 1)) + .addAggregation(max("max").field("value").script(new Script("_value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -198,7 +205,7 @@ public class MaxTests extends AbstractNumericTests { public void testMultiValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").field("values").script("_value + 1")) + .addAggregation(max("max").field("values").script(new Script("_value + 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -212,9 +219,11 @@ public class MaxTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").field("values").script("_value + inc").param("inc", 1)) + .addAggregation(max("max").field("values").script(new Script("_value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -230,7 +239,7 @@ public class MaxTests extends AbstractNumericTests { public void testScript_SingleValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").script("doc['value'].value")) + .addAggregation(max("max").script(new Script("doc['value'].value"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -244,9 +253,11 @@ public class MaxTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(max("max").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -260,9 +271,11 @@ public class MaxTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(max("max").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -278,7 +291,7 @@ public class MaxTests extends AbstractNumericTests { public void testScript_MultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").script("doc['values'].values")) + .addAggregation(max("max").script(new Script("doc['values'].values"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -294,7 +307,7 @@ public class MaxTests extends AbstractNumericTests { public void testScript_ExplicitMultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(max("max").script("doc['values'].values")) + .addAggregation(max("max").script(new Script("doc['values'].values"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -308,9 +321,11 @@ public class MaxTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(max("max").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1)) + Map params = new HashMap<>(); + params.put("inc", 1); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + max("max").script(new Script("[ doc['value'].value, doc['value'].value + inc ]", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -320,6 +335,4 @@ public class MaxTests extends AbstractNumericTests { assertThat(max.getName(), equalTo("max")); assertThat(max.getValue(), equalTo(11.0)); } - - } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java index 1018ffdf639..94f895911a6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.min.Min; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -150,7 +155,7 @@ public class MinTests extends AbstractNumericTests { public void testSingleValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(min("min").field("value").script("_value - 1")) + .addAggregation(min("min").field("value").script(new Script("_value - 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -164,9 +169,11 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(min("min").field("value").script("_value - dec").param("dec", 1)) + .addAggregation(min("min").field("value").script(new Script("_value - dec", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -198,8 +205,7 @@ public class MinTests extends AbstractNumericTests { public void testMultiValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(min("min").field("values").script("_value - 1")) - .execute().actionGet(); + .addAggregation(min("min").field("values").script(new Script("_value - 1"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -211,11 +217,10 @@ public class MinTests extends AbstractNumericTests { @Test public void testMultiValuedField_WithValueScript_Reverse() throws Exception { - // test what happens when values arrive in reverse order since the min aggregator is optimized to work on sorted values - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").field("values").script("_value * -1")) - .execute().actionGet(); + // test what happens when values arrive in reverse order since the min + // aggregator is optimized to work on sorted values + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").field("values").script(new Script("_value * -1"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -228,10 +233,11 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").field("values").script("_value - dec").param("dec", 1)) - .execute().actionGet(); + Map params = new HashMap<>(); + params.put("dec", 1); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").field("values").script(new Script("_value - dec", ScriptType.INLINE, null, params))).execute() + .actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -244,10 +250,8 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").script("doc['value'].value")) - .execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").script(new Script("doc['value'].value"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -260,10 +264,11 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1)) - .execute().actionGet(); + Map params = new HashMap<>(); + params.put("dec", 1); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").script(new Script("doc['value'].value - dec", ScriptType.INLINE, null, params))).execute() + .actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -276,10 +281,11 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1)) - .execute().actionGet(); + Map params = new HashMap<>(); + params.put("dec", 1); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").script(new Script("doc['value'].value - dec", ScriptType.INLINE, null, params))).execute() + .actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -292,10 +298,8 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").script("doc['values'].values")) - .execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").script(new Script("doc['values'].values"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -308,10 +312,8 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitMultiValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(min("min").script("doc['values'].values")) - .execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(min("min").script(new Script("doc['values'].values"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -324,10 +326,16 @@ public class MinTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") + Map params = new HashMap<>(); + params.put("dec", 1); + SearchResponse searchResponse = client() + .prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(min("min").script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1)) - .execute().actionGet(); + .addAggregation( + min("min") + .script(new Script( + "List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;", + ScriptType.INLINE, null, params))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java index 2512a519dfa..b59c11ac18e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java @@ -22,6 +22,8 @@ import com.google.common.collect.Lists; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; @@ -31,7 +33,9 @@ import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks import org.junit.Test; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; @@ -49,7 +53,7 @@ import static org.hamcrest.Matchers.sameInstance; public class PercentileRanksTests extends AbstractNumericTests { private static double[] randomPercents(long minValue, long maxValue) { - + final int length = randomIntBetween(1, 20); final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { @@ -229,7 +233,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .field("value").script("_value - 1") +.field("value").script(new Script("_value - 1")) .percentiles(pcts)) .execute().actionGet(); @@ -242,11 +246,14 @@ public class PercentileRanksTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .field("value").script("_value - dec").param("dec", 1) +.field("value") + .script(new Script("_value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -280,7 +287,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .field("values").script("_value - 1") +.field("values").script(new Script("_value - 1")) .percentiles(pcts)) .execute().actionGet(); @@ -296,7 +303,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .field("values").script("_value * -1") +.field("values").script(new Script("_value * -1")) .percentiles(pcts)) .execute().actionGet(); @@ -309,11 +316,14 @@ public class PercentileRanksTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .field("values").script("_value - dec").param("dec", 1) +.field("values") + .script(new Script("_value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -330,7 +340,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("doc['value'].value") +.script(new Script("doc['value'].value")) .percentiles(pcts)) .execute().actionGet(); @@ -343,11 +353,14 @@ public class PercentileRanksTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("doc['value'].value - dec").param("dec", 1) +.script( + new Script("doc['value'].value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -360,11 +373,14 @@ public class PercentileRanksTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercents(minValue -1 , maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("doc['value'].value - dec").param("dec", 1) +.script( + new Script("doc['value'].value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -381,7 +397,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("doc['values'].values") +.script(new Script("doc['values'].values")) .percentiles(pcts)) .execute().actionGet(); @@ -398,7 +414,7 @@ public class PercentileRanksTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("doc['values'].values") +.script(new Script("doc['values'].values")) .percentiles(pcts)) .execute().actionGet(); @@ -411,11 +427,15 @@ public class PercentileRanksTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentileRanks("percentile_ranks")) - .script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1) + .script(new Script( + "List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;", + ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java index 63141f420fe..ed9c3c33b82 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java @@ -22,6 +22,8 @@ import com.google.common.collect.Lists; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; @@ -31,7 +33,9 @@ import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesBuil import org.junit.Test; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; @@ -212,7 +216,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .field("value").script("_value - 1") +.field("value").script(new Script("_value - 1")) .percentiles(pcts)) .execute().actionGet(); @@ -225,11 +229,14 @@ public class PercentilesTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .field("value").script("_value - dec").param("dec", 1) +.field("value") + .script(new Script("_value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -263,7 +270,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .field("values").script("_value - 1") +.field("values").script(new Script("_value - 1")) .percentiles(pcts)) .execute().actionGet(); @@ -279,7 +286,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .field("values").script("_value * -1") +.field("values").script(new Script("_value * -1")) .percentiles(pcts)) .execute().actionGet(); @@ -292,11 +299,14 @@ public class PercentilesTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .field("values").script("_value - dec").param("dec", 1) +.field("values") + .script(new Script("_value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -313,7 +323,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("doc['value'].value") +.script(new Script("doc['value'].value")) .percentiles(pcts)) .execute().actionGet(); @@ -326,11 +336,14 @@ public class PercentilesTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("doc['value'].value - dec").param("dec", 1) +.script( + new Script("doc['value'].value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -343,11 +356,14 @@ public class PercentilesTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("doc['value'].value - dec").param("dec", 1) +.script( + new Script("doc['value'].value - dec", ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); @@ -364,7 +380,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("doc['values'].values") +.script(new Script("doc['values'].values")) .percentiles(pcts)) .execute().actionGet(); @@ -381,7 +397,7 @@ public class PercentilesTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("doc['values'].values") +.script(new Script("doc['values'].values")) .percentiles(pcts)) .execute().actionGet(); @@ -394,11 +410,15 @@ public class PercentilesTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(randomCompression(percentiles("percentiles")) - .script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1) + .script(new Script( + "List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;", + ScriptType.INLINE, null, params)) .percentiles(pcts)) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java index 5e81a806335..e5ae18774f8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java @@ -20,11 +20,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -163,7 +168,7 @@ public class StatsTests extends AbstractNumericTests { double expectedMaxValue = 10.0; assertThat(stats.getMax(), equalTo(expectedMaxValue)); assertThat((double) global.getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; assertThat(stats.getSum(), equalTo(expectedSumValue)); assertThat((double) global.getProperty("stats.sum"), equalTo(expectedSumValue)); long expectedCountValue = 10; @@ -198,7 +203,7 @@ public class StatsTests extends AbstractNumericTests { public void testSingleValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("value").script("_value + 1")) + .addAggregation(stats("stats").field("value").script(new Script("_value + 1"))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -218,9 +223,11 @@ public class StatsTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("value").script("_value + inc").param("inc", 1)) + .addAggregation(stats("stats").field("value").script(new Script("_value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -264,7 +271,7 @@ public class StatsTests extends AbstractNumericTests { public void testMultiValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("values").script("_value - 1")) + .addAggregation(stats("stats").field("values").script(new Script("_value - 1"))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -284,9 +291,11 @@ public class StatsTests extends AbstractNumericTests { @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("values").script("_value - dec").param("dec", 1)) + .addAggregation(stats("stats").field("values").script(new Script("_value - dec", ScriptType.INLINE, null, params))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -308,7 +317,7 @@ public class StatsTests extends AbstractNumericTests { public void testScript_SingleValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("doc['value'].value")) + .addAggregation(stats("stats").script(new Script("doc['value'].value"))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -328,9 +337,11 @@ public class StatsTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(stats("stats").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -350,9 +361,11 @@ public class StatsTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(stats("stats").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -374,7 +387,7 @@ public class StatsTests extends AbstractNumericTests { public void testScript_MultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("doc['values'].values")) + .addAggregation(stats("stats").script(new Script("doc['values'].values"))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -396,7 +409,7 @@ public class StatsTests extends AbstractNumericTests { public void testScript_ExplicitMultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("doc['values'].values")) + .addAggregation(stats("stats").script(new Script("doc['values'].values"))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); @@ -416,9 +429,13 @@ public class StatsTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("dec", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(stats("stats").script("[ doc['value'].value, doc['value'].value - dec ]").param("dec", 1)) + .addAggregation( + stats("stats").script( + new Script("[ doc['value'].value, doc['value'].value - dec ]", ScriptType.INLINE, null, params))) .execute().actionGet(); assertShardExecutionState(searchResponse, 0); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java index 89060a70ccf..6145cc5054c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -149,7 +154,7 @@ public class SumTests extends AbstractNumericTests { public void testSingleValuedField_WithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("value").script("_value + 1")) + .addAggregation(sum("sum").field("value").script(new Script("_value + 1"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -163,9 +168,11 @@ public class SumTests extends AbstractNumericTests { @Override @Test public void testSingleValuedField_WithValueScript_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("increment", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("value").script("_value + increment").param("increment", 1)) + .addAggregation(sum("sum").field("value").script(new Script("_value + increment", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -181,7 +188,7 @@ public class SumTests extends AbstractNumericTests { public void testScript_SingleValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("doc['value'].value")) + .addAggregation(sum("sum").script(new Script("doc['value'].value"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -195,9 +202,11 @@ public class SumTests extends AbstractNumericTests { @Override @Test public void testScript_SingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(sum("sum").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -211,9 +220,11 @@ public class SumTests extends AbstractNumericTests { @Override @Test public void testScript_ExplicitSingleValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1)) + .addAggregation(sum("sum").script(new Script("doc['value'].value + inc", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -230,7 +241,7 @@ public class SumTests extends AbstractNumericTests { public void testScript_MultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + 1 ]")) + .addAggregation(sum("sum").script(new Script("[ doc['value'].value, doc['value'].value + 1 ]"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -246,7 +257,7 @@ public class SumTests extends AbstractNumericTests { public void testScript_ExplicitMultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + 1 ]")) + .addAggregation(sum("sum").script(new Script("[ doc['value'].value, doc['value'].value + 1 ]"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -260,9 +271,12 @@ public class SumTests extends AbstractNumericTests { @Override @Test public void testScript_MultiValued_WithParams() throws Exception { + Map params = new HashMap<>(); + params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1)) + .addAggregation( + sum("sum").script(new Script("[ doc['value'].value, doc['value'].value + inc ]", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -296,24 +310,23 @@ public class SumTests extends AbstractNumericTests { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("values").script("_value + 1")) - .execute().actionGet(); + .addAggregation(sum("sum").field("values").script(new Script("_value + 1"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); Sum sum = searchResponse.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13)); + assertThat(sum.getValue(), equalTo((double) 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12 + 12 + 13)); } @Override @Test public void testMultiValuedField_WithValueScript_WithParams() throws Exception { - - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("values").script("_value + increment").param("increment", 1)) + Map params = new HashMap<>(); + params.put("increment", 1); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(sum("sum").field("values").script(new Script("_value + increment", ScriptType.INLINE, null, params))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -321,6 +334,6 @@ public class SumTests extends AbstractNumericTests { Sum sum = searchResponse.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13)); + assertThat(sum.getValue(), equalTo((double) 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12 + 12 + 13)); } } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java index acbd5b74591..0840bd60708 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java @@ -19,11 +19,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.count; @@ -142,10 +147,8 @@ public class ValueCountTests extends ElasticsearchIntegrationTest { @Test public void singleValuedScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").script("doc['value'].value")) - .execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(count("count").script(new Script("doc['value'].value"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -157,10 +160,8 @@ public class ValueCountTests extends ElasticsearchIntegrationTest { @Test public void multiValuedScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").script("doc['values'].values")) - .execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(count("count").script(new Script("doc['values'].values"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -172,10 +173,10 @@ public class ValueCountTests extends ElasticsearchIntegrationTest { @Test public void singleValuedScriptWithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").script("doc[s].value").param("s", "value")) - .execute().actionGet(); + Map params = new HashMap<>(); + params.put("s", "value"); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(count("count").script(new Script("doc[s].value", ScriptType.INLINE, null, params))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); @@ -187,10 +188,10 @@ public class ValueCountTests extends ElasticsearchIntegrationTest { @Test public void multiValuedScriptWithParams() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").script("doc[s].values").param("s", "values")) - .execute().actionGet(); + Map params = new HashMap<>(); + params.put("s", "values"); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(count("count").script(new Script("doc[s].values", ScriptType.INLINE, null, params))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java index 81a065455ab..1aa63898033 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java @@ -25,7 +25,6 @@ import com.google.common.collect.EvictingQueue; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; @@ -1265,6 +1264,42 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { fail("Model [" + builder.toString() + "] can be minimized, but an exception was thrown"); } } + } + + @Test + public void testUnrecognizedParams() { + + MovAvgModelBuilder[] builders = new MovAvgModelBuilder[]{ + new SimpleModel.SimpleModelBuilder(), + new LinearModel.LinearModelBuilder(), + new EwmaModel.EWMAModelBuilder(), + new HoltLinearModel.HoltLinearModelBuilder(), + new HoltWintersModel.HoltWintersModelBuilder() + }; + Map badSettings = new HashMap<>(1); + badSettings.put("abc", 1.2); + + for (MovAvgModelBuilder builder : builders) { + try { + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(movingAvg("movavg_counts") + .window(10) + .modelBuilder(builder) + .gapPolicy(gapPolicy) + .settings(badSettings) + .setBucketsPaths("_count")) + ).execute().actionGet(); + } catch (SearchPhaseExecutionException e) { + // All good + } + } + + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java index 575d263fa9f..fb9e5fa09aa 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java @@ -611,8 +611,6 @@ public class MovAvgUnitTests extends ElasticsearchTestCase { for (MovAvgModel.AbstractModelParser parser : parsers) { for (Object v : values) { settings.put("alpha", v); - settings.put("beta", v); - settings.put("gamma", v); try { parser.parse(settings, "pipeline", 10, ParseFieldMatcher.STRICT); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffTests.java new file mode 100644 index 00000000000..70aa5669dd4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffTests.java @@ -0,0 +1,291 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.serialdiff; + +import com.google.common.collect.EvictingQueue; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests; +import org.elasticsearch.search.aggregations.pipeline.SimpleValue; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.*; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.diff; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class SerialDiffTests extends ElasticsearchIntegrationTest { + private static final String INTERVAL_FIELD = "l_value"; + private static final String VALUE_FIELD = "v_value"; + + static int interval; + static int numBuckets; + static int lag; + static BucketHelpers.GapPolicy gapPolicy; + static ValuesSourceMetricsAggregationBuilder metric; + static List mockHisto; + + static Map> testValues; + + enum MetricTarget { + VALUE ("value"), COUNT("count"); + + private final String name; + + MetricTarget(String s) { + name = s; + } + + public String toString(){ + return name; + } + } + + private ValuesSourceMetricsAggregationBuilder randomMetric(String name, String field) { + int rand = randomIntBetween(0,3); + + switch (rand) { + case 0: + return min(name).field(field); + case 2: + return max(name).field(field); + case 3: + return avg(name).field(field); + default: + return avg(name).field(field); + } + } + + private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { + if (!expectedBucketIter.hasNext()) { + fail("`expectedBucketIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedCountsIter.hasNext()) { + fail("`expectedCountsIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedValuesIter.hasNext()) { + fail("`expectedValuesIter` iterator ended before `actual` iterator, size mismatch"); + } + } + + private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, Double expectedValue) { + // This is a gap bucket + SimpleValue countDiff = actual.getAggregations().get("diff_counts"); + if (expectedCount == null) { + assertThat("[_count] diff is not null", countDiff, nullValue()); + } else { + assertThat("[_count] diff is null", countDiff, notNullValue()); + assertThat("[_count] diff does not match expected [" + countDiff.value() + " vs " + expectedCount + "]", + countDiff.value(), closeTo(expectedCount, 0.1)); + } + + // This is a gap bucket + SimpleValue valuesDiff = actual.getAggregations().get("diff_values"); + if (expectedValue == null) { + assertThat("[value] diff is not null", valuesDiff, Matchers.nullValue()); + } else { + assertThat("[value] diff is null", valuesDiff, notNullValue()); + assertThat("[value] diff does not match expected [" + valuesDiff.value() + " vs " + expectedValue + "]", + valuesDiff.value(), closeTo(expectedValue, 0.1)); + } + } + + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + List builders = new ArrayList<>(); + + + interval = 5; + numBuckets = randomIntBetween(10, 80); + lag = randomIntBetween(1, numBuckets / 2); + + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.SKIP : BucketHelpers.GapPolicy.INSERT_ZEROS; + metric = randomMetric("the_metric", VALUE_FIELD); + mockHisto = PipelineAggregationHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); + + testValues = new HashMap<>(8); + + for (MetricTarget target : MetricTarget.values()) { + setupExpected(target); + } + + for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { + for (double value : mockBucket.docValues) { + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, mockBucket.key) + .field(VALUE_FIELD, value).endObject())); + } + } + + indexRandom(true, builders); + ensureSearchable(); + } + + /** + * @param target The document field "target", e.g. _count or a field value + */ + private void setupExpected(MetricTarget target) { + ArrayList values = new ArrayList<>(numBuckets); + EvictingQueue lagWindow = EvictingQueue.create(lag); + + int counter = 0; + for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { + Double metricValue; + double[] docValues = mockBucket.docValues; + + // Gaps only apply to metric values, not doc _counts + if (mockBucket.count == 0 && target.equals(MetricTarget.VALUE)) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { + metricValue = null; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + metricValue = 0.0; + } else { + metricValue = PipelineAggregationHelperTests.calculateMetric(docValues, metric); + } + + } else { + // If this isn't a gap, or is a _count, just insert the value + metricValue = target.equals(MetricTarget.VALUE) ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) : mockBucket.count; + } + + counter += 1; + + // Still under the initial lag period, add nothing and move on + Double lagValue; + if (counter <= lag) { + lagValue = Double.NaN; + } else { + lagValue = lagWindow.peek(); // Peek here, because we rely on add'ing to always move the window + } + + // Normalize null's to NaN + if (metricValue == null) { + metricValue = Double.NaN; + } + + // Both have values, calculate diff and replace the "empty" bucket + if (!Double.isNaN(metricValue) && !Double.isNaN(lagValue)) { + double diff = metricValue - lagValue; + values.add(diff); + } else { + values.add(null); // The tests need null, even though the agg doesn't + } + + lagWindow.add(metricValue); + + + + + } + + + testValues.put(target.toString(), values); + } + + @Test + public void basicDiff() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(diff("diff_counts") + .lag(lag) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(diff("diff_values") + .lag(lag) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); + + List expectedCounts = testValues.get(MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MetricTarget.VALUE.toString()); + + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); + } + } + + @Test + public void invalidLagSize() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(diff("diff_counts") + .lag(-1) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + ).execute().actionGet(); + } catch (SearchPhaseExecutionException e) { + // All good + } + } + + +} diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java index 589c9bb230a..23b1702a519 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java @@ -45,11 +45,6 @@ import static org.hamcrest.Matchers.is; @ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest { -// @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " + -// "and between getting the cluster state to do the search, and executing it, " + -// "the shard has fully relocated (moved from started on one node, to fully started on another node") -// ^^ the current impl of the test handles this case gracefully since it can happen with 1 replica as well -// we just make sure if we get a partial result without a failure that the postsearch is ok! @Test @Nightly public void testSearchAndRelocateConcurrently0Replicas() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java index f7b0489977b..02af90d0845 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.child; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -147,7 +146,6 @@ public class ChildQuerySearchBwcTests extends ChildQuerySearchTests { } @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270") public void testParentFieldDataCacheBug() throws Exception { assertAcked(prepareCreate("test") .setSettings(Settings.builder().put(indexSettings()) diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java index f89946a58cf..c37565f845f 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.child; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.count.CountResponse; @@ -2013,7 +2012,6 @@ public class ChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9461") public void testParentFieldToNonExistingType() { assertAcked(prepareCreate("test").addMapping("parent").addMapping("child", "_parent", "type=parent2")); client().prepareIndex("test", "parent", "1").setSource("{}").get(); @@ -2032,19 +2030,6 @@ public class ChildQuerySearchTests extends ElasticsearchIntegrationTest { .setQuery(QueryBuilders.hasParentQuery("parent", matchAllQuery())) .get(); assertHitCount(response, 0); - - try { - client().prepareSearch("test") - .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.hasChildQuery("child", matchAllQuery()))) - .get(); - fail(); - } catch (SearchPhaseExecutionException e) { - } - - response = client().prepareSearch("test") - .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.hasParentQuery("parent", matchAllQuery()))) - .get(); - assertHitCount(response, 0); } static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) { diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java index 944ecacfc3a..f1617e40c5e 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java +++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.child; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; @@ -41,7 +40,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; import java.io.IOException; @@ -49,9 +47,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.child.ChildQuerySearchTests.hasChildQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -66,35 +62,20 @@ public class ParentFieldLoadingBwcTest extends ElasticsearchIntegrationTest { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_6_0) .build(); - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270") public void testParentFieldDataCacheBug() throws Exception { assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put(indexSettings()) - .put("index.refresh_interval", -1)) // Disable automatic refresh, so that the _parent doesn't get warmed + .setSettings(Settings.builder().put(indexSettings) + .put("index.refresh_interval", -1)) // Disable automatic refresh, so that the _parent doesn't get warmed .addMapping("parent", XContentFactory.jsonBuilder().startObject().startObject("parent") - .startObject("properties") - .startObject("p_field") - .field("type", "string") - .startObject("fielddata") - .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY) - .endObject() - .endObject() - .endObject().endObject().endObject())); - - ensureGreen(); - - client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get(); - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - - refresh(); - // No _parent field yet, there shouldn't be anything in the field data for _parent field - IndicesStatsResponse indicesStatsResponse = client().admin().indices() - .prepareStats("test").setFieldData(true).get(); - assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l)); - - // Now add mapping + children - client().admin().indices().preparePutMapping("test").setType("child") - .setSource(XContentFactory.jsonBuilder().startObject().startObject("child") + .startObject("properties") + .startObject("p_field") + .field("type", "string") + .startObject("fielddata") + .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY) + .endObject() + .endObject() + .endObject().endObject().endObject()) + .addMapping("child", XContentFactory.jsonBuilder().startObject().startObject("child") .startObject("_parent") .field("type", "parent") .endObject() @@ -105,22 +86,23 @@ public class ParentFieldLoadingBwcTest extends ElasticsearchIntegrationTest { .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY) .endObject() .endObject() - .endObject().endObject().endObject()) - .get(); + .endObject().endObject().endObject())); - // index simple data + ensureGreen(); + + client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get(); + client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - refresh(); - indicesStatsResponse = client().admin().indices() + IndicesStatsResponse statsResponse = client().admin().indices() .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get(); - assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l)); - assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l)); + assertThat(statsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(statsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l)); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue")))) @@ -128,18 +110,18 @@ public class ParentFieldLoadingBwcTest extends ElasticsearchIntegrationTest { assertNoFailures(searchResponse); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - indicesStatsResponse = client().admin().indices() + statsResponse = client().admin().indices() .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get(); - assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l)); - assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l)); + assertThat(statsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(statsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l)); ClearIndicesCacheResponse clearCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).get(); assertNoFailures(clearCacheResponse); assertAllSuccessful(clearCacheResponse); - indicesStatsResponse = client().admin().indices() + statsResponse = client().admin().indices() .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get(); - assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l)); - assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), equalTo(0l)); + assertThat(statsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l)); + assertThat(statsResponse.getTotal().getFieldData().getFields().get("_parent"), equalTo(0l)); } public void testEagerParentFieldLoading() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java index 74aefb63f35..e3be92890d2 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java @@ -87,7 +87,7 @@ public class FunctionScoreBackwardCompatibilityTests extends ElasticsearchBackwa checkFunctionScoreStillWorks(ids); logClusterState(); // prevent any kind of allocation during the upgrade we recover from gateway - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get(); + disableAllocation("test"); boolean upgraded; int upgradedNodesCounter = 1; do { @@ -97,7 +97,7 @@ public class FunctionScoreBackwardCompatibilityTests extends ElasticsearchBackwa logClusterState(); checkFunctionScoreStillWorks(ids); } while (upgraded); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get(); + enableAllocation("test"); logger.debug("done function_score while upgrading"); } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java index 8b7b505059f..014143a2be9 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java @@ -27,9 +27,8 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery; -import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.anyOf; @@ -88,7 +87,7 @@ public class GeoPolygonTests extends ElasticsearchIntegrationTest { public void simplePolygonTest() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") // from NY - .setQuery(filteredQuery(matchAllQuery(), geoPolygonQuery("location") + .setQuery(boolQuery().must(geoPolygonQuery("location") .addPoint(40.7, -74.0) .addPoint(40.7, -74.1) .addPoint(40.8, -74.1) @@ -101,4 +100,20 @@ public class GeoPolygonTests extends ElasticsearchIntegrationTest { assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"))); } } + + @Test + public void simpleUnclosedPolygon() throws Exception { + SearchResponse searchResponse = client().prepareSearch("test") // from NY + .setQuery(boolQuery().must(geoPolygonQuery("location") + .addPoint(40.7, -74.0) + .addPoint(40.7, -74.1) + .addPoint(40.8, -74.1) + .addPoint(40.8, -74.0))) + .execute().actionGet(); + assertHitCount(searchResponse, 4); + assertThat(searchResponse.getHits().hits().length, equalTo(4)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"))); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java index bdfac267224..dc125558209 100644 --- a/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java +++ b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java @@ -88,7 +88,7 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest { @Test public void simplePreferenceTests() throws Exception { - createIndex("test"); + client().admin().indices().prepareCreate("test").setSettings("number_of_replicas=1").get(); ensureGreen(); client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet(); @@ -104,12 +104,47 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest { searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica").execute().actionGet(); + assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica").execute().actionGet(); + assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica_first").execute().actionGet(); + assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica_first").execute().actionGet(); + assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); } + @Test + public void testReplicaPreference() throws Exception { + client().admin().indices().prepareCreate("test").setSettings("number_of_replicas=0").get(); + ensureGreen(); + + client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet(); + refresh(); + + try { + client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica").execute().actionGet(); + fail("should have failed because there are no replicas"); + } catch (Exception e) { + // pass + } + + SearchResponse resp = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica_first").execute().actionGet(); + assertThat(resp.getHits().totalHits(), equalTo(1l)); + + client().admin().indices().prepareUpdateSettings("test").setSettings("number_of_replicas=1").get(); + ensureGreen("test"); + + resp = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_replica").execute().actionGet(); + assertThat(resp.getHits().totalHits(), equalTo(1l)); + } + @Test (expected = IllegalArgumentException.class) public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception { createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index cd715242caa..04b3026cfdd 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -81,7 +81,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .startObject("_all").field("omit_norms", true).endObject() .endObject().endObject()) .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)); // only one shard otherwise IDF might be different for comparing scores - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"), client().prepareIndex("test", "type1", "3").setSource("field1", "quick")); @@ -111,7 +111,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"), client().prepareIndex("test", "type1", "3").setSource("field1", "quick")); - ensureYellow(); + assertHitCount(client().prepareSearch().setQuery(queryStringQuery("quick")).get(), 3l); assertHitCount(client().prepareSearch().setQuery(queryStringQuery("")).get(), 0l); // return no docs } @@ -122,7 +122,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get(); client().prepareIndex("test", "type1", "3").setSource("field1", "value3").get(); - ensureGreen(); + waitForRelocation(); optimize(); refresh(); @@ -154,7 +154,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { createIndex("test"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get(); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery("{ \"term\" : { \"field1\" : \"value1_1\" }}").get(); assertHitCount(searchResponse, 1l); } @@ -167,7 +166,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox")); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get(); assertHitCount(searchResponse, 1l); @@ -181,7 +179,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { Random random = getRandom(); createIndex("test"); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox")); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); assertHitCount(searchResponse, 2l); for (SearchHit searchHit : searchResponse.getHits().hits()) { @@ -214,7 +212,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } createIndex("test_1"); indexRandom(true, builders); - ensureYellow(); + int queryRounds = scaledRandomIntBetween(10, 20); for (int i = 0; i < queryRounds; i++) { MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num))); @@ -244,7 +242,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("foo", "bar"), client().prepareIndex("test", "type1", "2").setSource("foo", "bar") ); - ensureYellow(); + int iters = scaledRandomIntBetween(100, 200); for (int i = 0; i < iters; i++) { SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("*:*^10.0").boost(10.0f)).get(); @@ -266,7 +264,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("message", "test message", "comment", "whatever"), client().prepareIndex("test", "type1", "2").setSource("message", "hello world", "comment", "test comment")); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("_all", "test")).get(); assertHitCount(searchResponse, 2l); assertFirstHit(searchResponse, hasId("2")); @@ -283,7 +280,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") ); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get(); assertHitCount(searchResponse, 3l); assertFirstHit(searchResponse, hasId("1")); @@ -371,7 +368,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .put("index.analysis.analyzer.syns.filter","syns") ) .addMapping("type1", "field1", "type=string,analyzer=syns", "field2", "type=string,analyzer=syns")); - ensureGreen(); indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"), client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"), @@ -473,7 +469,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox")); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get(); assertHitCount(searchResponse, 1l); try { @@ -498,7 +494,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*").analyzeWildcard(true)).get(); assertHitCount(searchResponse, 1l); @@ -522,7 +517,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(true)).get(); assertHitCount(searchResponse, 1l); searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(false)).get(); @@ -544,7 +538,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test").addMapping( "type", "past", "type=date", "future", "type=date" )); - ensureGreen(); String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1)); String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1)); @@ -573,7 +566,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test").addMapping( "type", "past", "type=date" )); - ensureGreen(); DateTimeZone timeZone = randomDateTimeZone(); String now = ISODateTimeFormat.dateTime().print(new DateTime(timeZone)); @@ -593,7 +585,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test").addMapping( "type", "past", "type=date" )); - ensureGreen(); client().prepareIndex("test", "type", "1").setSource("past", "2015-04-05T23:00:00+0000").get(); client().prepareIndex("test", "type", "2").setSource("past", "2015-04-06T00:00:00+0000").get(); @@ -650,7 +641,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type2", "2").setSource("field1", "value1"), client().prepareIndex("test", "type2", "3").setSource("field1", "value1")); - ensureYellow(); assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeQuery("type1"))).get(), 2l); assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeQuery("type2"))).get(), 3l); @@ -681,7 +671,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("field1", "value2"), client().prepareIndex("test", "type1", "3").setSource("field1", "value3")); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").ids("1", "3"))).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "3"); @@ -736,7 +725,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { indexRandom(true, client().prepareIndex(indexName, "type1", indexName + "1").setSource("field1", "value1")); } - ensureYellow(); + for (String indexName : indexNames) { SearchResponse request = client().prepareSearch().setQuery(constantScoreQuery(termQuery("_index", indexName))).get(); SearchResponse searchResponse = assertSearchResponse(request); @@ -771,7 +760,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"), client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4")); - ensureYellow(); + assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), limitQuery(2))).get(), 4l); // no-op } @@ -785,7 +774,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()), client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()) ); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("field1"))).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "2"); @@ -849,7 +838,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get(); - ensureYellow(); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1l); @@ -863,7 +851,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testFiltersWithCustomCacheKey() throws Exception { createIndex("test"); - ensureGreen(); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get(); @@ -882,7 +870,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testMatchQueryNumeric() throws Exception { assertAcked(prepareCreate("test").addMapping("type1", "long", "type=long", "double", "type=double")); - ensureGreen(); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d), client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d), @@ -912,7 +899,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2"), client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1") ); - ensureYellow(); MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2"); SearchResponse searchResponse = client().prepareSearch().setQuery(builder) .addAggregation(AggregationBuilders.terms("field1").field("field1")).get(); @@ -976,7 +962,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get(); refresh(); - ensureYellow(); BoolQueryBuilder boolQuery = boolQuery() .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)) .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); @@ -1001,7 +986,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get(); refresh(); - ensureYellow(); + BoolQueryBuilder boolQuery = boolQuery() .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)) .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together @@ -1026,8 +1011,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get(); refresh(); - ensureYellow(); - MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); multiMatchQuery.useDisMax(true); @@ -1074,8 +1057,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - ensureYellow(); - SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:kimcy~1")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1l); @@ -1098,7 +1079,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match") ); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")).get(); assertHitCount(searchResponse, 2l); @@ -1121,7 +1102,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - ensureYellow(); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get(); assertHitCount(searchResponse, 1l); assertFirstHit(searchResponse, hasId("2")); @@ -1149,7 +1129,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testEmptytermsQuery() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "term", "type=string")); - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "type", "1").setSource("term", "1"), client().prepareIndex("test", "type", "2").setSource("term", "2"), client().prepareIndex("test", "type", "3").setSource("term", "3"), @@ -1166,7 +1146,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testFieldDatatermsQuery() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double")); - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "type", "1").setSource("str", "1", "lng", 1l, "dbl", 1.0d), client().prepareIndex("test", "type", "2").setSource("str", "2", "lng", 2l, "dbl", 2.0d), @@ -1240,8 +1220,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .endObject().endObject().endObject().endObject().endObject().endObject())); assertAcked(prepareCreate("test").addMapping("type", "term", "type=string")); - ensureGreen(); - indexRandom(true, client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}), client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}), @@ -1333,7 +1311,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testBasicFilterById() throws Exception { createIndex("test"); - ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get(); @@ -1379,7 +1356,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testBasicQueryById() throws Exception { createIndex("test"); - ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get(); @@ -1422,7 +1398,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { "num_byte", "type=byte", "num_short", "type=short", "num_integer", "type=integer", "num_long", "type=long", "num_float", "type=float", "num_double", "type=double")); - ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1, "num_long", 1, "num_float", 1, "num_double", 1).get(); @@ -1523,7 +1498,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { "num_byte", "type=byte", "num_short", "type=short", "num_integer", "type=integer", "num_long", "type=long", "num_float", "type=float", "num_double", "type=double")); - ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "test1", "num_long", 1).get(); client().prepareIndex("test", "type1", "2").setSource("field1", "test1", "num_long", 2).get(); @@ -1559,7 +1533,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testEmptyTopLevelFilter() { client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).get(); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setPostFilter("{}").get(); assertHitCount(searchResponse, 1l); } @@ -1569,7 +1543,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test") //issue manifested only with shards>=2 .setSettings(SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS))); - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"), client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"), @@ -1590,7 +1564,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test // see #2994 public void testSimpleSpan() throws IOException, ExecutionException, InterruptedException { createIndex("test"); - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"), client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"), @@ -1611,7 +1585,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testSpanMultiTermQuery() throws IOException { createIndex("test"); - ensureGreen(); client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar", "count", 1).get(); client().prepareIndex("test", "test", "2").setSource("description", "foo other anything", "count", 2).get(); @@ -1644,7 +1617,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testSpanNot() throws IOException, ExecutionException, InterruptedException { createIndex("test"); - ensureGreen(); client().prepareIndex("test", "test", "1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); client().prepareIndex("test", "test", "2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); @@ -1706,7 +1678,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .endObject() .endObject()) .addMapping("bs", "online", "type=boolean", "ts", "type=date,ignore_malformed=false,format=epoch_millis")); - ensureGreen(); + client().prepareIndex("test", "s", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100).get(); client().prepareIndex("test", "s", "2").setRouting("X").setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000).get(); @@ -1738,7 +1710,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testMultiFieldQueryString() { client().prepareIndex("test", "s", "1").setSource("field1", "value1", "field2", "value2").setRefresh(true).get(); - ensureYellow(); + logger.info("regular"); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")).get(), 1); @@ -1770,7 +1742,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .put("index.analysis.filter.synonym.type", "synonym") .putArray("index.analysis.filter.synonym.synonyms", "fast, quick")); assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search")); - ensureGreen(); + client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(Operator.AND)).get(); @@ -1801,7 +1773,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .put("index.analysis.filter.unique_stem.type", "unique") .put("index.analysis.filter.unique_stem.only_on_same_position", true)); assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search")); - ensureGreen(); + client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(Operator.AND)).get(); @@ -1826,7 +1798,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .put("index.analysis.filter.synonym.type", "synonym") .putArray("index.analysis.filter.synonym.synonyms", "fast, quick")); assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search")); - ensureGreen(); client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get(); refresh(); @@ -1862,7 +1833,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { "analysis.filter.custom_word_delimiter.split_on_numerics", "false", "analysis.filter.custom_word_delimiter.stem_english_possessive", "false") .addMapping("type1", "field1", "type=string,analyzer=my_analyzer", "field2", "type=string,analyzer=my_analyzer")); - ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "foo bar baz", "field2", "not needed").get(); refresh(); @@ -1878,7 +1848,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test // see https://github.com/elasticsearch/elasticsearch/issues/3797 public void testMultiMatchLenientIssue3797() { createIndex("test"); - ensureGreen(); + client().prepareIndex("test", "type1", "1").setSource("field1", 123, "field2", "value2").get(); refresh(); @@ -1898,7 +1868,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testIndicesQuery() throws Exception { createIndex("index1", "index2", "index3"); - ensureGreen(); + client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get(); client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get(); @@ -1932,7 +1902,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testIndicesFilter() throws Exception { createIndex("index1", "index2", "index3"); - ensureGreen(); client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get(); client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get(); @@ -1970,7 +1939,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("related") .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent") .endObject().endObject().endObject())); - ensureGreen(); client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get(); client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get(); @@ -2003,7 +1971,6 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("related") .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent") .endObject().endObject().endObject())); - ensureGreen(); indexRandom(true, client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1"), @@ -2033,7 +2000,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { public void testIndicesQueryMissingIndices() throws IOException, ExecutionException, InterruptedException { createIndex("index1"); createIndex("index2"); - ensureGreen(); + indexRandom(true, client().prepareIndex("index1", "type1", "1").setSource("field", "match"), client().prepareIndex("index1", "type1", "2").setSource("field", "no_match"), @@ -2103,7 +2070,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { createIndex("index1"); createIndex("index2"); createIndex("index3"); - ensureGreen(); + indexRandom(true, client().prepareIndex("index1", "type1", "1").setSource("field", "match"), client().prepareIndex("index1", "type1", "2").setSource("field", "no_match"), @@ -2174,7 +2141,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { @Test public void testMinScore() throws ExecutionException, InterruptedException { createIndex("test"); - ensureGreen(); + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("score", 1.5), client().prepareIndex("test", "test", "2").setSource("score", 1.0), @@ -2191,7 +2158,6 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco @Test public void testQueryStringWithSlopAndFields() { createIndex("test"); - ensureGreen(); client().prepareIndex("test", "customer", "1").setSource("desc", "one two three").get(); client().prepareIndex("test", "product", "2").setSource("desc", "one two three").get(); @@ -2226,7 +2192,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco client().prepareIndex("test", "type", "2").setSource("field", -1000000000000L), client().prepareIndex("test", "type", "3").setSource("field", -999999999999L)); - ensureYellow(); + assertHitCount(client().prepareCount("test").setQuery(rangeQuery("field").lte(-1000000000000L)).get(), 2); assertHitCount(client().prepareCount("test").setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3); } @@ -2235,7 +2201,6 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco public void testRangeFilterWithTimeZone() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", "date", "type=date", "num", "type=integer")); - ensureGreen(); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "num", 1), @@ -2333,7 +2298,6 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco public void testRangeQueryWithTimeZone() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", "date", "type=date", "num", "type=integer")); - ensureGreen(); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "num", 1), @@ -2429,7 +2393,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco public void testSearchEmptyDoc() { assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}")); client().prepareIndex("test", "type1", "1").setSource("{}").get(); - ensureYellow(); + refresh(); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l); } @@ -2446,7 +2410,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco .putArray("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0])); assertAcked(builder.addMapping("test", "origin", "type=string,copy_to=meta", "meta", "type=string,analyzer=my_ngram_analyzer")); // we only have ngrams as the index analyzer so searches will get standard analyzer - ensureGreen(); + client().prepareIndex("test", "test", "1").setSource("origin", "C.A1234.5678") .setRefresh(true) @@ -2488,7 +2452,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"), client().prepareIndex("test1", "type1", "2").setSource("field", "trying out Elasticsearch")); - ensureYellow(); + SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field", "Johnnie la").slop(between(2,5)).type(Type.PHRASE_PREFIX)).get(); assertHitCount(searchResponse, 1l); assertSearchHits(searchResponse, "1"); @@ -2511,7 +2475,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco } indexRandom(true, docs); - ensureGreen(); + int iters = between(1, 100); for (int i = 0; i < iters; i++) { String intToEnglish = English.intToEnglish(between(0, numDocs - 1)); @@ -2557,7 +2521,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco createIndex("test"); indexRandom(true, false, client().prepareIndex("test", "type", "1").setSource("nameTokens", "xyz")); - ensureYellow(); + SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) @@ -2583,7 +2547,7 @@ functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinSco public void testIdsQueryWithInvalidValues() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test", "type", "1").setSource("body", "foo")); - ensureYellow(); + try { client().prepareSearch("test") .setTypes("type") diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java index 6920d015c48..fdff3509109 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -576,4 +577,31 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { } } + public void testCloseAndReopenOrDeleteWithActiveScroll() throws IOException { + createIndex("test"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet(); + } + refresh(); + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(35) + .setScroll(TimeValue.timeValueMinutes(2)) + .addSort("field", SortOrder.ASC) + .execute().actionGet(); + long counter = 0; + assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l)); + assertThat(searchResponse.getHits().hits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++)); + } + if (randomBoolean()) { + client().admin().indices().prepareClose("test").get(); + client().admin().indices().prepareOpen("test").get(); + ensureGreen("test"); + } else { + client().admin().indices().prepareDelete("test").get(); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java index d4446fef52a..1d8c2e44190 100644 --- a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java @@ -33,13 +33,11 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.search.stats.SearchStats.Stats; import org.elasticsearch.script.Script; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.util.HashSet; import java.util.Set; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -162,33 +160,47 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest { @Test public void testOpenContexts() { - createIndex("test1"); - ensureGreen("test1"); + String index = "test1"; + createIndex(index); + ensureGreen(index); + + // create shards * docs number of docs and attempt to distribute them equally + // this distribution will not be perfect; each shard will have an integer multiple of docs (possibly zero) + // we do this so we have a lot of pages to scroll through final int docs = scaledRandomIntBetween(20, 50); - for (int i = 0; i < docs; i++) { - client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet(); + for (int s = 0; s < numAssignedShards(index); s++) { + for (int i = 0; i < docs; i++) { + client() + .prepareIndex(index, "type", Integer.toString(s * docs + i)) + .setSource("field", "value") + .setRouting(Integer.toString(s)) + .execute() + .actionGet(); + } } - IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet(); + client().admin().indices().prepareRefresh(index).execute().actionGet(); + + IndicesStatsResponse indicesStats = client().admin().indices().prepareStats(index).execute().actionGet(); assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l)); + int size = scaledRandomIntBetween(1, docs); SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.SCAN) .setQuery(matchAllQuery()) - .setSize(5) + .setSize(size) .setScroll(TimeValue.timeValueMinutes(2)) .execute().actionGet(); assertSearchResponse(searchResponse); - indicesStats = client().admin().indices().prepareStats().execute().actionGet(); - assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo((long) numAssignedShards("test1"))); - assertThat(indicesStats.getTotal().getSearch().getTotal().getScrollCurrent(), equalTo((long) numAssignedShards("test1"))); + // refresh the stats now that scroll contexts are opened + indicesStats = client().admin().indices().prepareStats(index).execute().actionGet(); - // force the scan to complete measuring the time taken - // the total time the scroll is open should be greater than this - // the number of queries should equal the number of pages in the scan times the number of shards - int count = 0; + assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo((long) numAssignedShards(index))); + assertThat(indicesStats.getTotal().getSearch().getTotal().getScrollCurrent(), equalTo((long) numAssignedShards(index))); + + int hits = 0; while (true) { - count++; + hits += searchResponse.getHits().getHits().length; searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()) .setScroll(TimeValue.timeValueMinutes(2)) .execute().actionGet(); @@ -196,10 +208,18 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest { break; } } + long expected = 0; + + // the number of queries executed is equal to the sum of 1 + number of pages in shard over all shards + IndicesStatsResponse r = client().admin().indices().prepareStats(index).execute().actionGet(); + for (int s = 0; s < numAssignedShards(index); s++) { + expected += 1 + (long)Math.ceil(r.getShards()[s].getStats().getDocs().getCount() / size); + } indicesStats = client().admin().indices().prepareStats().execute().actionGet(); Stats stats = indicesStats.getTotal().getSearch().getTotal(); - assertThat(stats.getQueryCount(), equalTo(count * (long)numAssignedShards("test1"))); - assertThat(stats.getScrollCount(), equalTo((long)numAssignedShards("test1"))); + assertEquals(hits, docs * numAssignedShards(index)); + assertThat(stats.getQueryCount(), equalTo(expected)); + assertThat(stats.getScrollCount(), equalTo((long)numAssignedShards(index))); assertThat(stats.getScrollTimeInMillis(), greaterThan(0l)); // scroll, but with no timeout (so no context) diff --git a/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatTests.java b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatTests.java new file mode 100644 index 00000000000..4ddeb2bd782 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatTests.java @@ -0,0 +1,296 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.ElasticsearchCorruptionException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.index.translog.BufferedChecksumStreamOutput; +import org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.elasticsearch.repositories.blobstore.LegacyBlobStoreFormat; +import org.junit.Test; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Map; +import java.util.concurrent.*; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; + +public class BlobStoreFormatTests extends AbstractSnapshotTests { + + private static final ParseFieldMatcher parseFieldMatcher = new ParseFieldMatcher(Settings.EMPTY); + + public static final String BLOB_CODEC = "blob"; + + private static class BlobObj implements ToXContent, FromXContentBuilder { + public static final BlobObj PROTO = new BlobObj(""); + + private final String text; + + public BlobObj(String text) { + this.text = text; + } + + public String getText() { + return text; + } + + @Override + public BlobObj fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + String text = null; + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("unexpected token [{}]", token); + } + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (token.isValue()) { + if ("text" .equals(currentFieldName)) { + text = parser.text(); + } else { + throw new ElasticsearchParseException("unexpected field [{}]", currentFieldName); + } + } else { + throw new ElasticsearchParseException("unexpected token [{}]", token); + } + } + } + if (text == null) { + throw new ElasticsearchParseException("missing mandatory parameter text"); + } + return new BlobObj(text); + } + + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.field("text", getText()); + return builder; + } + } + + /** + * Extends legacy format with writing functionality. It's used to simulate legacy file formats in tests. + */ + private static final class LegacyEmulationBlobStoreFormat extends LegacyBlobStoreFormat { + + protected final XContentType xContentType; + + protected final boolean compress; + + public LegacyEmulationBlobStoreFormat(String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher, boolean compress, XContentType xContentType) { + super(blobNameFormat, reader, parseFieldMatcher); + this.xContentType = xContentType; + this.compress = compress; + } + + public void write(T obj, BlobContainer blobContainer, String blobName) throws IOException { + BytesReference bytes = write(obj); + try (OutputStream outputStream = blobContainer.createOutput(blobName)) { + bytes.writeTo(outputStream); + } + } + + private BytesReference write(T obj) throws IOException { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + return bytesStreamOutput.bytes(); + } + } + + private void write(T obj, StreamOutput streamOutput) throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput); + builder.startObject(); + obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + builder.endObject(); + builder.close(); + } + } + + @Test + public void testBlobStoreOperations() throws IOException { + BlobStore blobStore = createTestBlobStore(); + BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); + ChecksumBlobStoreFormat checksumJSON = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.JSON); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.SMILE); + ChecksumBlobStoreFormat checksumSMILECompressed = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, true, XContentType.SMILE); + LegacyEmulationBlobStoreFormat legacyJSON = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.JSON); + LegacyEmulationBlobStoreFormat legacySMILE = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.SMILE); + LegacyEmulationBlobStoreFormat legacySMILECompressed = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, true, XContentType.SMILE); + + // Write blobs in different formats + checksumJSON.write(new BlobObj("checksum json"), blobContainer, "check-json"); + checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile"); + checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp"); + legacyJSON.write(new BlobObj("legacy json"), blobContainer, "legacy-json"); + legacySMILE.write(new BlobObj("legacy smile"), blobContainer, "legacy-smile"); + legacySMILECompressed.write(new BlobObj("legacy smile compressed"), blobContainer, "legacy-smile-comp"); + + // Assert that all checksum blobs can be read by all formats + assertEquals(checksumJSON.read(blobContainer, "check-json").getText(), "checksum json"); + assertEquals(checksumSMILE.read(blobContainer, "check-json").getText(), "checksum json"); + assertEquals(checksumJSON.read(blobContainer, "check-smile").getText(), "checksum smile"); + assertEquals(checksumSMILE.read(blobContainer, "check-smile").getText(), "checksum smile"); + assertEquals(checksumJSON.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed"); + assertEquals(checksumSMILE.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed"); + + // Assert that all legacy blobs can be read be all formats + assertEquals(legacyJSON.read(blobContainer, "legacy-json").getText(), "legacy json"); + assertEquals(legacySMILE.read(blobContainer, "legacy-json").getText(), "legacy json"); + assertEquals(legacyJSON.read(blobContainer, "legacy-smile").getText(), "legacy smile"); + assertEquals(legacySMILE.read(blobContainer, "legacy-smile").getText(), "legacy smile"); + assertEquals(legacyJSON.read(blobContainer, "legacy-smile-comp").getText(), "legacy smile compressed"); + assertEquals(legacySMILE.read(blobContainer, "legacy-smile-comp").getText(), "legacy smile compressed"); + } + + + @Test + public void testCompressionIsApplied() throws IOException { + BlobStore blobStore = createTestBlobStore(); + BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); + StringBuilder veryRedundantText = new StringBuilder(); + for (int i = 0; i < randomIntBetween(100, 300); i++) { + veryRedundantText.append("Blah "); + } + ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, true, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + BlobObj blobObj = new BlobObj(veryRedundantText.toString()); + checksumFormatComp.write(blobObj, blobContainer, "blob-comp"); + checksumFormat.write(blobObj, blobContainer, "blob-not-comp"); + Map blobs = blobContainer.listBlobsByPrefix("blob-"); + assertEquals(blobs.size(), 2); + assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length())); + } + + @Test + public void testBlobCorruption() throws IOException { + BlobStore blobStore = createTestBlobStore(); + BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); + String testString = randomAsciiOfLength(randomInt(10000)); + BlobObj blobObj = new BlobObj(testString); + ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + checksumFormat.write(blobObj, blobContainer, "test-path"); + assertEquals(checksumFormat.read(blobContainer, "test-path").getText(), testString); + randomCorruption(blobContainer, "test-path"); + try { + checksumFormat.read(blobContainer, "test-path"); + fail("Should have failed due to corruption"); + } catch (ElasticsearchCorruptionException ex) { + assertThat(ex.getMessage(), containsString("test-path")); + } catch (EOFException ex) { + // This can happen if corrupt the byte length + } + } + + public void testAtomicWrite() throws Exception { + final BlobStore blobStore = createTestBlobStore(); + final BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); + String testString = randomAsciiOfLength(randomInt(10000)); + final CountDownLatch block = new CountDownLatch(1); + final CountDownLatch unblock = new CountDownLatch(1); + final BlobObj blobObj = new BlobObj(testString) { + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + super.toXContent(builder, params); + // Block before finishing writing + try { + block.countDown(); + unblock.await(5, TimeUnit.SECONDS); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + return builder; + } + }; + final ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + ExecutorService threadPool = Executors.newFixedThreadPool(1); + try { + Future future = threadPool.submit(new Callable() { + @Override + public Void call() throws Exception { + checksumFormat.writeAtomic(blobObj, blobContainer, "test-blob"); + return null; + } + }); + block.await(5, TimeUnit.SECONDS); + assertFalse(blobContainer.blobExists("test-blob")); + unblock.countDown(); + future.get(); + assertTrue(blobContainer.blobExists("test-blob")); + } finally { + threadPool.shutdown(); + } + } + + protected BlobStore createTestBlobStore() throws IOException { + Settings settings = Settings.builder().build(); + return new FsBlobStore(settings, randomRepoPath()); + } + + protected void randomCorruption(BlobContainer blobContainer, String blobName) throws IOException { + byte[] buffer = new byte[(int) blobContainer.listBlobsByPrefix(blobName).get(blobName).length()]; + long originalChecksum = checksum(buffer); + try (InputStream inputStream = blobContainer.openInput(blobName)) { + Streams.readFully(inputStream, buffer); + } + do { + int location = randomIntBetween(0, buffer.length - 1); + buffer[location] = (byte) (buffer[location] ^ 42); + } while (originalChecksum == checksum(buffer)); + try (OutputStream outputStream = blobContainer.createOutput(blobName)) { + Streams.copy(buffer, outputStream); + } + } + + private long checksum(byte[] buffer) throws IOException { + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + try (BufferedChecksumStreamOutput checksumOutput = new BufferedChecksumStreamOutput(streamOutput)) { + checksumOutput.write(buffer); + return checksumOutput.getChecksum(); + } + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java index f4989314f4b..a6432e5cda5 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java +++ b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java @@ -136,8 +136,9 @@ public class RepositoriesTests extends AbstractSnapshotTests { assertThat(ex.toString(), containsString("missing location")); } - logger.info("--> trying creating repository with location that is not registered in path.repo setting"); - String location = createTempDir().toAbsolutePath().toString(); + logger.info("--> trying creating fs repository with location that is not registered in path.repo setting"); + Path invalidRepoPath = createTempDir().toAbsolutePath(); + String location = invalidRepoPath.toString(); try { client().admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder().put("location", location)) @@ -146,6 +147,28 @@ public class RepositoriesTests extends AbstractSnapshotTests { } catch (RepositoryException ex) { assertThat(ex.toString(), containsString("location [" + location + "] doesn't match any of the locations specified by path.repo")); } + + String repoUrl = invalidRepoPath.toAbsolutePath().toUri().toURL().toString(); + String unsupportedUrl = repoUrl.replace("file:/", "netdoc:/"); + logger.info("--> trying creating url repository with unsupported url protocol"); + try { + client().admin().cluster().preparePutRepository("test-repo") + .setType("url").setSettings(Settings.settingsBuilder().put("url", unsupportedUrl)) + .get(); + fail("Shouldn't be here"); + } catch (RepositoryException ex) { + assertThat(ex.toString(), containsString("unsupported url protocol [netdoc]")); + } + + logger.info("--> trying creating url repository with location that is not registered in path.repo setting"); + try { + client().admin().cluster().preparePutRepository("test-repo") + .setType("url").setSettings(Settings.settingsBuilder().put("url", invalidRepoPath.toUri().toURL())) + .get(); + fail("Shouldn't be here"); + } catch (RepositoryException ex) { + assertThat(ex.toString(), containsString("doesn't match any of the locations specified by path.repo")); + } } @Test diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index 8419d54a944..bf2e3aa98f7 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -234,7 +234,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); createIndex("test"); - String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID); + String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue(originalIndexUUID, originalIndexUUID != null); assertFalse(originalIndexUUID, originalIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE)); ensureGreen(); @@ -247,7 +247,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { assertAcked(prepareCreate("test").setSettings(Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries))); ensureGreen(); - String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID); + String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue(newIndexUUID, newIndexUUID != null); assertFalse(newIndexUUID, newIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE)); assertFalse(newIndexUUID, newIndexUUID.equals(originalIndexUUID)); @@ -259,7 +259,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); - String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID); + String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); assertTrue("UUID has changed after restore: " + newIndexUUID + " vs. " + newAfterRestoreIndexUUID, newIndexUUID.equals(newAfterRestoreIndexUUID)); logger.info("--> restore indices with different names"); @@ -267,7 +267,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_UUID); + String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_INDEX_UUID); assertFalse("UUID has been reused on restore: " + copyRestoreUUID + " vs. " + originalIndexUUID, copyRestoreUUID.equals(originalIndexUUID)); } @@ -906,7 +906,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); logger.info("--> truncate snapshot file to make it unreadable"); - Path snapshotPath = repo.resolve("snapshot-test-snap-1"); + Path snapshotPath = repo.resolve("snap-test-snap-1.dat"); try(SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java index 69cf99923f8..d545e1076d0 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java @@ -111,7 +111,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo assertThat(client().prepareCount(indices).get().getCount(), lessThan((long) (buildersBefore.length + buildersAfter.length))); - client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get(); + disableAllocation(indices); backwardsCluster().allowOnAllNodes(indices); logClusterState(); boolean upgraded; @@ -124,7 +124,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo countResponse = client().prepareCount().get(); assertHitCount(countResponse, numDocs); } while (upgraded); - client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get(); + enableAllocation(indices); logger.info("--> close indices"); client().admin().indices().prepareClose("index_before_*").get(); @@ -201,7 +201,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo } if (frequently()) { logger.info("--> upgrade"); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get(); + disableAllocation("test"); backwardsCluster().allowOnAllNodes("test"); logClusterState(); boolean upgraded; @@ -214,7 +214,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo countResponse = client().prepareCount().get(); assertHitCount(countResponse, numDocs); } while (upgraded); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get(); + enableAllocation("test"); } if (cluster().numDataNodes() > 1 && randomBoolean()) { // only bump the replicas if we have enough nodes logger.info("--> move from 0 to 1 replica"); diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 0eb9f069b4b..790c088cc02 100644 --- a/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -31,6 +31,7 @@ import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.http.impl.client.HttpClients; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; @@ -1086,7 +1087,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase // Check that the non-master node has the same version of the cluster state as the master and that this node didn't disconnect from the master if (masterClusterState.version() == localClusterState.version() && localClusterState.nodes().nodes().containsKey(masterId)) { try { - assertEquals("clusterstate UUID does not match", masterClusterState.uuid(), localClusterState.uuid()); + assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); // We cannot compare serialization bytes since serialization order of maps is not guaranteed // but we can compare serialization sizes - they should be the same assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize); @@ -1228,6 +1229,24 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase return actionGet.isExists(); } + /** + * Syntactic sugar for enabling allocation for indices + */ + protected final void enableAllocation(String... indices) { + client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all" + )).get(); + } + + /** + * Syntactic sugar for disabling allocation for indices + */ + protected final void disableAllocation(String... indices) { + client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none" + )).get(); + } + /** * Returns a random admin client. This client can either be a node or a transport client pointing to any of * the nodes in the cluster. diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 4bfafb266fc..73a107dca29 100644 --- a/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -513,7 +513,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { /** Return consistent index settings for the provided index version. */ public static Settings.Builder settings(Version version) { Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version); - if (version.before(Version.V_2_0_0)) { + if (version.before(Version.V_2_0_0_beta1)) { builder.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, DjbHashFunction.class); } return builder; diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index c1bbd9e796f..ac785c1601d 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -50,6 +50,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Nullable; @@ -163,8 +164,13 @@ public final class InternalTestCluster extends TestCluster { */ public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed"; + /** + * The number of ports in the range used for this JVM + */ + public static final int PORTS_PER_JVM = 100; + private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0")); - public static final int BASE_PORT = 9300 + 100 * (JVM_ORDINAL + 1); + public static final int BASE_PORT = 9300 + PORTS_PER_JVM * (JVM_ORDINAL + 1); private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); @@ -482,6 +488,9 @@ public final class InternalTestCluster extends TestCluster { builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000))); } + // always default delayed allocation to 0 to make sure we have tests are not delayed + builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, 0); + return builder.build(); } diff --git a/core/src/test/java/org/elasticsearch/test/TestCluster.java b/core/src/test/java/org/elasticsearch/test/TestCluster.java index a1f5f016a8d..322ed74197e 100644 --- a/core/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/TestCluster.java @@ -25,7 +25,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.indices.IndexMissingException; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.repositories.RepositoryMissingException; @@ -135,7 +135,7 @@ public abstract class TestCluster implements Iterable, Closeable { if (size() > 0) { try { assertAcked(client().admin().indices().prepareDelete(indices)); - } catch (IndexMissingException e) { + } catch (IndexNotFoundException e) { // ignore } catch (IllegalArgumentException e) { // Happens if `action.destructive_requires_name` is set to true diff --git a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java index 007f6b45d1d..b254a9198f6 100644 --- a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -30,6 +30,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -50,6 +54,7 @@ public class TestClusterService implements ClusterService { private final Collection listeners = new CopyOnWriteArrayList<>(); private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); private final ThreadPool threadPool; + private final ESLogger logger = Loggers.getLogger(getClass(), Settings.EMPTY); public TestClusterService() { this(ClusterState.builder(new ClusterName("test")).build()); @@ -67,8 +72,8 @@ public class TestClusterService implements ClusterService { if (state.getNodes().size() == 0) { state = ClusterState.builder(state).nodes( DiscoveryNodes.builder() - .put(new DiscoveryNode("test_id", DummyTransportAddress.INSTANCE, Version.CURRENT)) - .localNodeId("test_id")).build(); + .put(new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, Version.CURRENT)) + .localNodeId("test_node")).build(); } assert state.getNodes().localNode() != null; @@ -78,21 +83,26 @@ public class TestClusterService implements ClusterService { } - /** set the current state and trigger any registered listeners about the change */ - public void setState(ClusterState state) { + /** set the current state and trigger any registered listeners about the change, mimicking an update task */ + synchronized public ClusterState setState(ClusterState state) { assert state.getNodes().localNode() != null; // make sure we have a version increment state = ClusterState.builder(state).version(this.state.version() + 1).build(); + return setStateAndNotifyListeners(state); + } + + private ClusterState setStateAndNotifyListeners(ClusterState state) { ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state); this.state = state; for (ClusterStateListener listener : listeners) { listener.clusterChanged(event); } + return state; } /** set the current state and trigger any registered listeners about the change */ - public void setState(ClusterState.Builder state) { - setState(state.build()); + public ClusterState setState(ClusterState.Builder state) { + return setState(state.build()); } @Override @@ -172,12 +182,34 @@ public class TestClusterService implements ClusterService { } @Override - public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { - throw new UnsupportedOperationException(); + synchronized public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { + logger.debug("processing [{}]", source); + if (state().nodes().localNodeMaster() == false && updateTask.runOnlyOnMaster()) { + updateTask.onNoLongerMaster(source); + logger.debug("failed [{}], no longer master", source); + return; + } + ClusterState newState; + ClusterState previousClusterState = state; + try { + newState = updateTask.execute(previousClusterState); + } catch (Exception e) { + throw new ElasticsearchException("failed to process cluster state update task [" + source + "]", e); + } + setStateAndNotifyListeners(newState); + if (updateTask instanceof ProcessedClusterStateUpdateTask) { + ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newState); + } + logger.debug("finished [{}]", source); } @Override public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { + submitStateUpdateTask(source, Priority.NORMAL, updateTask); + } + + @Override + public TimeValue getMaxTaskWaitTime() { throw new UnsupportedOperationException(); } @@ -192,11 +224,6 @@ public class TestClusterService implements ClusterService { throw new UnsupportedOperationException(); } - @Override - public TimeValue getMaxTaskWaitTime() { - throw new UnsupportedOperationException(); - } - @Override public Lifecycle.State lifecycleState() { throw new UnsupportedOperationException(); @@ -213,17 +240,17 @@ public class TestClusterService implements ClusterService { } @Override - public ClusterService start() { + public ClusterService start() throws ElasticsearchException { throw new UnsupportedOperationException(); } @Override - public ClusterService stop() { + public ClusterService stop() throws ElasticsearchException { throw new UnsupportedOperationException(); } @Override - public void close() { + public void close() throws ElasticsearchException { throw new UnsupportedOperationException(); } diff --git a/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index 6b63243b546..f3c0da986e2 100644 --- a/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -142,11 +142,11 @@ public class ClusterDiscoveryConfiguration extends SettingsSource { int[] unicastHostPorts = new int[numHosts]; final int basePort = calcBasePort(); - final int maxPort = basePort + 1000; + final int maxPort = basePort + InternalTestCluster.PORTS_PER_JVM; int tries = 0; for (int i = 0; i < unicastHostPorts.length; i++) { boolean foundPortInRange = false; - while (tries < 1000 && !foundPortInRange) { + while (tries < InternalTestCluster.PORTS_PER_JVM && !foundPortInRange) { try (ServerSocket serverSocket = new ServerSocket()) { // Set SO_REUSEADDR as we may bind here and not be able to reuse the address immediately without it. serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress()); diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index f9430edd6b9..25cef6db14d 100644 --- a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -705,7 +705,6 @@ public class ElasticsearchAssertions { */ public static SearchResponse assertSearchResponse(SearchResponse response) { assertNoFailures(response); - assertThat("One or more shards were not successful but didn't trigger a failure", response.getSuccessfulShards(), equalTo(response.getTotalShards())); return response; } diff --git a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index e57732bf5f8..d63a0797f26 100644 --- a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -62,6 +62,13 @@ public class ReproduceInfoPrinter extends RunListener { logger.trace("Test {} finished", description.getDisplayName()); } + /** + * true if we are running maven integration tests (mvn verify) + */ + static boolean inVerifyPhase() { + return Boolean.parseBoolean(System.getProperty("tests.verify.phase")); + } + @Override public void testFailure(Failure failure) throws Exception { // Ignore assumptions. @@ -70,7 +77,11 @@ public class ReproduceInfoPrinter extends RunListener { } final StringBuilder b = new StringBuilder(); - b.append("REPRODUCE WITH: mvn test -Pdev"); + if (inVerifyPhase()) { + b.append("REPRODUCE WITH: mvn verify -Pdev -Dskip.unit.tests"); + } else { + b.append("REPRODUCE WITH: mvn test -Pdev"); + } MavenMessageBuilder mavenMessageBuilder = new MavenMessageBuilder(b); mavenMessageBuilder.appendAllOpts(failure.getDescription()); @@ -140,9 +151,13 @@ public class ReproduceInfoPrinter extends RunListener { } public ReproduceErrorMessageBuilder appendESProperties() { - appendProperties("es.logger.level", "es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES, - "tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size", - "tests.bwc", "tests.bwc.version"); + appendProperties("es.logger.level"); + if (!inVerifyPhase()) { + // these properties only make sense for unit tests + appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES); + } + appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", + "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version"); if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) { appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\""); } diff --git a/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index b815bd98d96..b9449d03db3 100644 --- a/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.node.Node; +import org.elasticsearch.repositories.uri.URLRepository; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.rest.client.RestException; @@ -155,6 +156,7 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .putArray(URLRepository.ALLOWED_URLS_SETTING, "http://snapshot.test*") .put(Node.HTTP_ENABLED, true) .put(super.nodeSettings(nodeOrdinal)).build(); } diff --git a/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java index 80b83affd28..dd6c972af00 100644 --- a/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java +++ b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsPhase; @@ -52,10 +53,10 @@ public class MockSearchService extends SearchService { } @Inject - public MockSearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, + public MockSearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { - super(settings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, + super(settings, nodeSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, queryPhase, fetchPhase, indicesQueryCache); } diff --git a/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 036d98cc187..5be741b9dce 100644 --- a/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -136,7 +136,7 @@ public class MockFSDirectoryService extends FsDirectoryService { throw new UnsupportedOperationException(); } - public void checkIndex(Store store, ShardId shardId) throws IndexShardException { + public void checkIndex(Store store, ShardId shardId) { if (store.tryIncRef()) { logger.info("start check index"); try { @@ -159,7 +159,7 @@ public class MockFSDirectoryService extends FsDirectoryService { logger.warn("check index [failure] index files={}\n{}", Arrays.toString(dir.listAll()), new String(os.bytes().toBytes(), Charsets.UTF_8)); - throw new IndexShardException(shardId, "index check failure"); + throw new IOException("index check failure"); } else { if (logger.isDebugEnabled()) { logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8)); diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java index 5f40da81718..0026957fe79 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java @@ -27,9 +27,9 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; @@ -145,7 +145,7 @@ public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest { assertThat(response.isValid(), equalTo(true)); } - @Test(expected = IndexMissingException.class) + @Test(expected = IndexNotFoundException.class) public void validateEmptyCluster() { client().admin().indices().prepareValidateQuery().get(); } diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.6.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.6.1.zip new file mode 100644 index 00000000000..04820f92b07 Binary files /dev/null and b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.6.1.zip differ diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.7.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.7.0.zip new file mode 100644 index 00000000000..941be645c65 Binary files /dev/null and b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.7.0.zip differ diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.6.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.6.1.zip new file mode 100644 index 00000000000..746f3ce613c Binary files /dev/null and b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.6.1.zip differ diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.7.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.7.0.zip new file mode 100644 index 00000000000..893689b0f4c Binary files /dev/null and b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.7.0.zip differ diff --git a/dev-tools/pom.xml b/dev-tools/pom.xml index 0daa4cbad51..369c003beee 100644 --- a/dev-tools/pom.xml +++ b/dev-tools/pom.xml @@ -2,7 +2,7 @@ 4.0.0 org.elasticsearch elasticsearch-dev-tools - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT Elasticsearch Build Resources org.sonatype.oss diff --git a/dev-tools/src/main/resources/ant/fixup-failsafe-summary.xslt b/dev-tools/src/main/resources/ant/fixup-failsafe-summary.xslt new file mode 100644 index 00000000000..ca98984abfb --- /dev/null +++ b/dev-tools/src/main/resources/ant/fixup-failsafe-summary.xslt @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/dev-tools/src/main/resources/ant/integration-tests.xml b/dev-tools/src/main/resources/ant/integration-tests.xml index 82f37497425..331a9c029c3 100644 --- a/dev-tools/src/main/resources/ant/integration-tests.xml +++ b/dev-tools/src/main/resources/ant/integration-tests.xml @@ -16,7 +16,7 @@ + -Des.script.indexed=on -Des.pidfile=${integ.pidfile} -Des.repositories.url.allowed_urls=http://snapshot.test*"/> @@ -24,6 +24,7 @@ + @@ -34,11 +35,13 @@ + + @@ -55,66 +58,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Installing plugin @{name}... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Starting up external cluster... + + + + + + + + + External cluster started PID ${integ.pid} + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - Starting up external cluster... - - - - - - - - External cluster started PID ${integ.pid} + + - - - - - - - - - - - - - - - Installing plugin ${project.artifactId}... - - - - Starting up external cluster... - - - - - - - - External cluster started PID ${integ.pid} + + + + + + + diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index c6bc98926ae..c566bb174e0 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -167,3 +167,4 @@ include::pipeline/movavg-aggregation.asciidoc[] include::pipeline/cumulative-sum-aggregation.asciidoc[] include::pipeline/bucket-script-aggregation.asciidoc[] include::pipeline/bucket-selector-aggregation.asciidoc[] +include::pipeline/serial-diff-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc new file mode 100644 index 00000000000..b6a9cf720fc --- /dev/null +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -0,0 +1,105 @@ +[[search-aggregations-pipeline-serialdiff-aggregation]] +=== Serial Differencing Aggregation + +coming[2.0.0] + +experimental[] + +Serial differencing is a technique where values in a time series are subtracted from itself at +different time lags or periods. For example, the datapoint f(x) = f(x~t~) - f(x~t-n~), where n is the period being used. + +A period of 1 is equivalent to a derivative with no time normalization: it is simply the change from one point to the +next. Single periods are useful for removing constant, linear trends. + +Single periods are also useful for transforming data into a stationary series. In this example, the Dow Jones is +plotted over ~250 days. The raw data is not stationary, which would make it difficult to use with some techniques. + +By calculating the first-difference, we de-trend the data (e.g. remove a constant, linear trend). We can see that the +data becomes a stationary series (e.g. the first difference is randomly distributed around zero, and doesn't seem to +exhibit any pattern/behavior). The transformation reveals that the dataset is following a random-walk; the value is the +previous value +/- a random amount. This insight allows selection of further tools for analysis. + +[[serialdiff_dow]] +.Dow Jones plotted and made stationary with first-differencing +image::images/pipeline_serialdiff/dow.png[] + +Larger periods can be used to remove seasonal / cyclic behavior. In this example, a population of lemmings was +synthetically generated with a sine wave + constant linear trend + random noise. The sine wave has a period of 30 days. + +The first-difference removes the constant trend, leaving just a sine wave. The 30th-difference is then applied to the +first-difference to remove the cyclic behavior, leaving a stationary series which is amenable to other analysis. + +[[serialdiff_lemmings]] +.Lemmings data plotted made stationary with 1st and 30th difference +image::images/pipeline_serialdiff/lemmings.png[] + + + +==== Syntax + +A `serial_diff` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "serial_diff": { + "buckets_path": "the_sum", + "lag": "7" + } +} +-------------------------------------------------- + +.`moving_avg` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`lag` |The historical bucket to subtract from the current value. E.g. a lag of 7 will subtract the current value from + the value 7 buckets ago. Must be a positive, non-zero integer |Optional |`1` +|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` +|`format` |Format to apply to the output value of this aggregation |Optional | `null` +|=== + +`serial_diff` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation: + +[source,js] +-------------------------------------------------- +{ + "aggs": { + "my_date_histo": { <1> + "date_histogram": { + "field": "timestamp", + "interval": "day" + }, + "aggs": { + "the_sum": { + "sum": { + "field": "lemmings" <2> + } + }, + "thirtieth_difference": { + "serial_diff": { <3> + "buckets_path": "lemmings", + "lag" : 30 + } + } + } + } + } +} +-------------------------------------------------- +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) +<3> Finally, we specify a `serial_diff` aggregation which uses "the_sum" metric as its input. + +Serial differences are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally +add normal metrics, such as a `sum`, inside of that histogram. Finally, the `serial_diff` is embedded inside the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see +<> for a description of the syntax for `buckets_path`. + + + + + + + + diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 5d51afb4aee..ead0e118af7 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -31,7 +31,7 @@ type: The `stopwords` parameter accepts either an array of stopwords: -[source,json] +[source,js] ------------------------------------ PUT /my_index { @@ -50,7 +50,7 @@ PUT /my_index or a predefined language-specific list: -[source,json] +[source,js] ------------------------------------ PUT /my_index { diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 7dfb3936e35..a1f4f66f59b 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -333,14 +333,6 @@ generates an edit distance based on the length of the term. For lengths: `AUTO` should generally be the preferred value for `fuzziness`. -- -`0.0..1.0`:: - -converted into an edit distance using the formula: `length(term) * (1.0 - -fuzziness)`, eg a `fuzziness` of `0.6` with a term of length 10 would result -in an edit distance of `4`. Note: in all APIs the maximum allowed edit distance is `2`. - - - [float] === Result Casing diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index bc29cc92d64..e5f3d967498 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -27,7 +27,7 @@ the available commands. Each of the commands accepts a query string parameter `v` to turn on verbose output. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/master?v' id ip node @@ -41,7 +41,7 @@ EGtKWZlWQYWDmX29fUnp3Q 127.0.0.1 Grey, Sara Each of the commands accepts a query string parameter `help` which will output its available columns. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/master?help' id | node id @@ -56,7 +56,7 @@ node | node name Each of the commands accepts a query string parameter `h` which forces only those columns to appear. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'n1:9200/_cat/nodes?h=ip,port,heapPercent,name' 192.168.56.40 9300 40.3 Captain Universe @@ -87,7 +87,7 @@ off human mode. We'll use a byte-level resolution. Then we'll pipe our output into `sort` using the appropriate column, which in this case is the eight one. -[source,shell] +[source,sh] -------------------------------------------------- % curl '192.168.56.10:9200/_cat/indices?bytes=b' | sort -rnk8 green wiki2 3 0 10000 0 105274918 105274918 diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 39b94911e26..4b1cde5f472 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -4,7 +4,7 @@ `aliases` shows information about currently configured aliases to indices including filter and routing infos. -[source,shell] +[source,sh] -------------------------------------------------- % curl '192.168.56.10:9200/_cat/aliases?v' alias index filter indexRouting searchRouting diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 69eb92a9d35..6fbdd9d43cc 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -4,7 +4,7 @@ `allocation` provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. -[source,shell] +[source,sh] -------------------------------------------------- % curl '192.168.56.10:9200/_cat/allocation?v' shards diskUsed diskAvail diskRatio ip node diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 51b246b1077..f264f825a48 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -4,7 +4,7 @@ `count` provides quick access to the document count of the entire cluster, or individual indices. -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/indices green wiki1 3 0 10000 331 168.5mb 168.5mb diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index c3ac311a622..250d0b39eaf 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -4,7 +4,7 @@ `fielddata` shows how much heap memory is currently being used by fielddata on every data node in the cluster. -[source,shell] +[source,sh] -------------------------------------------------- % curl '192.168.56.10:9200/_cat/fielddata?v' id host ip node total body text @@ -15,7 +15,7 @@ yaDkp-G3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 Microchip 284.6kb 109.2kb 175.3 Fields can be specified either as a query parameter, or in the URL path: -[source,shell] +[source,sh] -------------------------------------------------- % curl '192.168.56.10:9200/_cat/fielddata?v&fields=body' id host ip node total body diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index a5112031e3d..6b12a15dee2 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -5,7 +5,7 @@ from `/_cluster/health`. It has one option `ts` to disable the timestamping. -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/health 1384308967 18:16:07 foo green 3 3 3 3 0 0 0 @@ -17,7 +17,7 @@ foo green 3 3 3 3 0 0 0 0 A common use of this command is to verify the health is consistent across nodes: -[source,shell] +[source,sh] -------------------------------------------------- % pssh -i -h list.of.cluster.hosts curl -s localhost:9200/_cat/health [1] 20:20:52 [SUCCESS] es3.vm @@ -33,7 +33,7 @@ time. With enough shards, starting a cluster, or even recovering after losing a node, can take time (depending on your network & disk). A way to track its progress is by using this command in a delayed loop: -[source,shell] +[source,sh] -------------------------------------------------- % while true; do curl 192.168.56.10:9200/_cat/health; sleep 120; done 1384309446 18:24:06 foo red 3 3 20 20 0 0 1812 0 diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 5000cc19ec8..d63e55a8865 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -4,7 +4,7 @@ The `indices` command provides a cross-section of each index. This information *spans nodes*. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/indices/twi*?v' health status index pri rep docs.count docs.deleted store.size pri.store.size @@ -30,7 +30,7 @@ the view of relevant stats in the context of only the primaries. Which indices are yellow? -[source,shell] +[source,sh] -------------------------------------------------- % curl localhost:9200/_cat/indices | grep ^yell yellow open wiki 2 1 6401 1115 151.4mb 151.4mb @@ -39,7 +39,7 @@ yellow open twitter 5 1 11434 0 32mb 32mb What's my largest index by disk usage not including replicas? -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/indices?bytes=b' | sort -rnk8 green open wiki 2 0 6401 1115 158843725 158843725 @@ -49,7 +49,7 @@ green open twitter2 2 0 2030 0 6125085 6125085 How many merge operations have the shards for the `wiki` completed? -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/indices/wiki?pri&v&h=health,index,prirep,docs.count,mt' health index docs.count mt pri.mt @@ -58,7 +58,7 @@ green wiki 9646 16 16 How much memory is used per index? -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/indices?v&h=i,tm' i tm diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index bd7f5b97664..6189948d912 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -4,7 +4,7 @@ `master` doesn't have any extra options. It simply displays the master's node ID, bound IP address, and node name. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/master?v' id ip node @@ -15,7 +15,7 @@ This information is also available via the `nodes` command, but this is slightly shorter when all you want to do, for example, is verify all nodes agree on the master: -[source,shell] +[source,sh] -------------------------------------------------- % pssh -i -h list.of.cluster.hosts curl -s localhost:9200/_cat/master [1] 19:16:37 [SUCCESS] es3.vm diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index d5c6036ef35..0eccba76827 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -25,7 +25,7 @@ ActN 3806 192.168.56.20 9300 {version} {jdk} The next few give a picture of your heap, memory, and load. -[source,shell] +[source,sh] -------------------------------------------------- diskAvail heapPercent heapMax ramPercent ramMax load 72.1gb 31.3 93.9mb 81 239.1mb 0.24 @@ -39,7 +39,7 @@ ones. How many master-eligible nodes do I have? How many client nodes? It looks like someone restarted a node recently; which one was it? -[source,shell] +[source,sh] -------------------------------------------------- uptime data/client master name 3.5h d m Boneyard @@ -182,6 +182,9 @@ phase operations |0 phase |43ms |`search.query_total` |`sqto`, `searchFetchTotal` |No |Number of query operations |9 +|`search.scroll_current` |`scc`, `searchScrollCurrent` |No |Open scroll contexts |2 +|`search.scroll_time` |`scti`, `searchScrollTime` |No |Time scroll contexts held open|2m +|`search.scroll_total` |`scto`, `searchScrollTotal` |No |Completed scroll contexts |1 |`segments.count` |`sc`, `segmentsCount` |No |Number of segments |4 |`segments.memory` |`sm`, `segmentsMemory` |No |Memory used by segments |1.4kb diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index 35c3381eb94..5452052669c 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -5,7 +5,7 @@ <> API in a convenient tabular format. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/pending_tasks?v' insertOrder timeInQueue priority source diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 19e597ba7c6..a970463ff11 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -3,7 +3,7 @@ The `plugins` command provides a view per node of running plugins. This information *spans nodes*. -[source,shell] +[source,sh] ------------------------------------------------------------------------------ % curl 'localhost:9200/_cat/plugins?v' name component version type isolation url diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 857e631cec2..64265677609 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -12,7 +12,7 @@ way for shards to be loaded from disk when a node starts up. As an example, here is what the recovery state of a cluster may look like when there are no shards in transit from one node to another: -[source,shell] +[source,sh] ---------------------------------------------------------------------------- > curl -XGET 'localhost:9200/_cat/recovery?v' index shard time type stage source target files percent bytes percent @@ -28,7 +28,7 @@ Now let's see what a live recovery looks like. By increasing the replica count of our index and bringing another node online to host the replicas, we can see what a live shard recovery looks like. -[source,shell] +[source,sh] ---------------------------------------------------------------------------- > curl -XPUT 'localhost:9200/wiki/_settings' -d'{"number_of_replicas":1}' {"acknowledged":true} @@ -51,7 +51,7 @@ Finally, let's see what a snapshot recovery looks like. Assuming I have previous made a backup of my index, I can restore it using the <> API. -[source,shell] +[source,sh] -------------------------------------------------------------------------------- > curl -XPOST 'localhost:9200/_snapshot/imdb/snapshot_2/_restore' {"acknowledged":true} diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 9cc5215f6b6..f4ac6a4a22a 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -5,7 +5,7 @@ The `segments` command provides low level information about the segments in the shards of an index. It provides information similar to the link:indices-segments.html[_segments] endpoint. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'http://localhost:9200/_cat/segments?v' index shard prirep ip segment generation docs.count [...] @@ -14,7 +14,7 @@ test1 2 p 192.168.2.105 _0 0 1 test1 3 p 192.168.2.105 _2 2 1 -------------------------------------------------- -[source,shell] +[source,sh] -------------------------------------------------- [...] docs.deleted size size.memory committed searchable version compound 0 2.9kb 7818 false true 4.10.2 true diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 07662ce3cf1..a4359af258e 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -7,7 +7,7 @@ docs, the bytes it takes on disk, and the node where it's located. Here we see a single index, with three primary shards and no replicas: -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.20:9200/_cat/shards wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 Stiletto @@ -22,7 +22,7 @@ If you have many shards, you may wish to limit which indices show up in the output. You can always do this with `grep`, but you can save some bandwidth by supplying an index pattern to the end. -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.20:9200/_cat/shards/wiki2 wiki2 0 p STARTED 197 3.2mb 192.168.56.10 Stiletto @@ -37,7 +37,7 @@ wiki2 2 p STARTED 275 7.8mb 192.168.56.20 Commander Kraken Let's say you've checked your health and you see two relocating shards. Where are they from and where are they going? -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/health 1384315316 20:01:56 foo green 3 3 12 6 2 0 0 @@ -52,7 +52,7 @@ wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 Stiletto -> 192.168.56.30 Frankie Before a shard can be used, it goes through an `INITIALIZING` state. `shards` can show you which ones. -[source,shell] +[source,sh] -------------------------------------------------- % curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":1}' {"acknowledged":true} @@ -69,7 +69,7 @@ If a shard cannot be assigned, for example you've overallocated the number of replicas for the number of nodes in the cluster, they will remain `UNASSIGNED`. -[source,shell] +[source,sh] -------------------------------------------------- % curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":3}' % curl 192.168.56.20:9200/_cat/health diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 4372a761ffe..508b3ee167b 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -4,7 +4,7 @@ The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for the bulk, index and search thread pools. -[source,shell] +[source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/thread_pool host1 192.168.1.35 0 0 0 0 0 0 0 0 0 @@ -13,7 +13,7 @@ host2 192.168.1.36 0 0 0 0 0 0 0 0 0 The first two columns contain the host and ip of a node. -[source,shell] +[source,sh] -------------------------------------------------- host ip host1 192.168.1.35 @@ -22,7 +22,7 @@ host2 192.168.1.36 The next three columns show the active queue and rejected statistics for the bulk thread pool. -[source,shell] +[source,sh] -------------------------------------------------- bulk.active bulk.queue bulk.rejected 0 0 0 @@ -32,7 +32,7 @@ The remaining columns show the active queue and rejected statistics of the index Also other statistics of different thread pools can be retrieved by using the `h` (header) parameter. -[source,shell] +[source,sh] -------------------------------------------------- % curl 'localhost:9200/_cat/thread_pool?v&h=id,host,suggest.active,suggest.rejected,suggest.completed' host suggest.active suggest.rejected suggest.completed diff --git a/docs/reference/images/pipeline_serialdiff/dow.png b/docs/reference/images/pipeline_serialdiff/dow.png new file mode 100644 index 00000000000..d46f507ded1 Binary files /dev/null and b/docs/reference/images/pipeline_serialdiff/dow.png differ diff --git a/docs/reference/images/pipeline_serialdiff/lemmings.png b/docs/reference/images/pipeline_serialdiff/lemmings.png new file mode 100644 index 00000000000..25f3bb7d1a1 Binary files /dev/null and b/docs/reference/images/pipeline_serialdiff/lemmings.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 804c8894bc6..075c460321f 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -92,7 +92,7 @@ specific index module: index visible to search. Defaults to `1s`. Can be set to `-1` to disable refresh. -`index.codec`:: +[[index-codec]] `index.codec`:: experimental[] The `default` value compresses stored data with LZ4 compression, but this can be set to `best_compression` for a higher diff --git a/docs/reference/index-modules/allocation.asciidoc b/docs/reference/index-modules/allocation.asciidoc index c0a94c85eb5..66e41230687 100644 --- a/docs/reference/index-modules/allocation.asciidoc +++ b/docs/reference/index-modules/allocation.asciidoc @@ -12,6 +12,8 @@ include::allocation/filtering.asciidoc[] include::allocation/delayed.asciidoc[] +include::allocation/prioritization.asciidoc[] + include::allocation/total_shards.asciidoc[] diff --git a/docs/reference/index-modules/allocation/delayed.asciidoc b/docs/reference/index-modules/allocation/delayed.asciidoc index 31f5b8092f3..cc9e72e3647 100644 --- a/docs/reference/index-modules/allocation/delayed.asciidoc +++ b/docs/reference/index-modules/allocation/delayed.asciidoc @@ -32,7 +32,7 @@ requests) which have been automatically <>. The allocation of replica shards which become unassigned because a node has left can be delayed with the `index.unassigned.node_left.delayed_timeout` -dynamic setting, which defaults to `0` (reassign shards immediately). +dynamic setting, which defaults to `1m`. This setting can be updated on a live index (or on all indices): diff --git a/docs/reference/index-modules/allocation/filtering.asciidoc b/docs/reference/index-modules/allocation/filtering.asciidoc index d5e30fb76bb..99fd1dc7e2e 100644 --- a/docs/reference/index-modules/allocation/filtering.asciidoc +++ b/docs/reference/index-modules/allocation/filtering.asciidoc @@ -23,7 +23,7 @@ These metadata attributes can be used with the group of nodes. For instance, we can move the index `test` to either `big` or `medium` nodes as follows: -[source,json] +[source,js] ------------------------ PUT test/_settings { @@ -35,7 +35,7 @@ PUT test/_settings Alternatively, we can move the index `test` away from the `small` nodes with an `exclude` rule: -[source,json] +[source,js] ------------------------ PUT test/_settings { @@ -48,7 +48,7 @@ Multiple rules can be specified, in which case all conditions must be satisfied. For instance, we could move the index `test` to `big` nodes in `rack1` with the following: -[source,json] +[source,js] ------------------------ PUT test/_settings { @@ -87,7 +87,7 @@ These special attributes are also supported: All attribute values can be specified with wildcards, eg: -[source,json] +[source,js] ------------------------ PUT test/_settings { diff --git a/docs/reference/index-modules/allocation/prioritization.asciidoc b/docs/reference/index-modules/allocation/prioritization.asciidoc new file mode 100644 index 00000000000..b3307e90b79 --- /dev/null +++ b/docs/reference/index-modules/allocation/prioritization.asciidoc @@ -0,0 +1,55 @@ +[[recovery-prioritization]] +=== Index recovery prioritization + +Unallocated shards are recovered in order of priority, whenever possible. +Indices are sorted into priority order as follows: + +* the optional `index.priority` setting (higher before lower) +* the index creation date (higher before lower) +* the index name (higher before lower) + +This means that, by default, newer indices will be recovered before older indices. + +Use the per-index dynamically updateable `index.priority` setting to customise +the index prioritization order. For instance: + +[source,json] +------------------------------ +PUT index_1 + +PUT index_2 + +PUT index_3 +{ + "settings": { + "index.priority": 10 + } +} + +PUT index_4 +{ + "settings": { + "index.priority": 5 + } +} +------------------------------ +// AUTOSENSE + +In the above example: + +* `index_3` will be recovered first because it has the highest `index.priority`. +* `index_4` will be recovered next because it has the next highest priority. +* `index_2` will be recovered next because it was created more recently. +* `index_1` will be recovered last. + +This setting accepts an integer, and can be updated on a live index with the +<>: + +[source,json] +------------------------------ +PUT index_4/_settings +{ + "index.priority": 1 +} +------------------------------ +// AUTOSENSE diff --git a/docs/reference/index-modules/mapper.asciidoc b/docs/reference/index-modules/mapper.asciidoc index 9b55f630f71..4f82e2ffe7e 100644 --- a/docs/reference/index-modules/mapper.asciidoc +++ b/docs/reference/index-modules/mapper.asciidoc @@ -52,7 +52,7 @@ creating a new index. [float] === Mapper settings -`index.mapper.dynamic` (_static_):: +`index.mapper.dynamic` (_dynamic_):: Dynamic creation of mappings for unmapped types can be completely disabled by setting `index.mapper.dynamic` to `false`. diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index c603a00d89f..79712fbc266 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -23,7 +23,7 @@ index.store.type: niofs It is a _static_ setting that can be set on a per-index basis at index creation time: -[source,json] +[source,js] --------------------------------- PUT /my_index { diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index c9af8c12271..634e48801e5 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -51,6 +51,7 @@ and warmers. * <> * <> * <> +* <> [float] [[status-management]] @@ -93,12 +94,16 @@ include::indices/templates.asciidoc[] include::indices/warmers.asciidoc[] +include::indices/shadow-replicas.asciidoc[] + include::indices/stats.asciidoc[] include::indices/segments.asciidoc[] include::indices/recovery.asciidoc[] +include::indices/shard-stores.asciidoc[] + include::indices/clearcache.asciidoc[] include::indices/flush.asciidoc[] @@ -107,7 +112,5 @@ include::indices/refresh.asciidoc[] include::indices/optimize.asciidoc[] -include::indices/shadow-replicas.asciidoc[] - include::indices/upgrade.asciidoc[] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 9d04d7b2789..7312de48c5c 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -63,6 +63,23 @@ curl -XPOST 'http://localhost:9200/_aliases' -d ' }' -------------------------------------------------- +Alternatively, you can use a glob pattern to associate an alias to +more than one index that share a common name: + +[source,js] +-------------------------------------------------- +curl -XPOST 'http://localhost:9200/_aliases' -d ' +{ + "actions" : [ + { "add" : { "index" : "test*", "alias" : "all_test_indices" } } + ] +}' +-------------------------------------------------- + +In this case, the alias is a point-in-time alias that will group all +current indices that match, it will not automatically update as new +indices that match this pattern are added/removed. + It is an error to index to an alias which points to more than one index. [float] diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 3ee2601bb38..ef8cd27548b 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -70,7 +70,7 @@ recovery without the synced flush marker would take a long time. To check whether a shard has a marker or not, look for the `commit` section of shard stats returned by the <> API: -[source,bash] +[source,sh] -------------------------------------------------- GET /twitter/_stats/commit?level=shards -------------------------------------------------- @@ -134,7 +134,7 @@ NOTE: It is harmless to request a synced flush while there is ongoing indexing. that are not will fail. Any shards that succeeded will have faster recovery times. -[source,bash] +[source,sh] -------------------------------------------------- POST /twitter/_flush/synced -------------------------------------------------- diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc new file mode 100644 index 00000000000..1a9161155af --- /dev/null +++ b/docs/reference/indices/shard-stores.asciidoc @@ -0,0 +1,73 @@ +[[indices-shards-stores]] +== Indices Shard Stores + +Provides store information for shard copies of indices. +Store information reports on which nodes shard copies exist, the shard +copy version, indicating how recent they are, and any exceptions +encountered while opening the shard index or from earlier engine failure. + +By default, only lists store information for shards that have at least one +unallocated copy. When the cluster health status is yellow, this will list +store information for shards that have at least one unassigned replica. +When the cluster health status is red, this will list store information +for shards, which has unassigned primaries. + +Endpoints include shard stores information for a specific index, several +indices, or all: + +[source,js] +-------------------------------------------------- +curl -XGET 'http://localhost:9200/test/_shard_stores' +curl -XGET 'http://localhost:9200/test1,test2/_shard_stores' +curl -XGET 'http://localhost:9200/_shard_stores' +-------------------------------------------------- + +The scope of shards to list store information can be changed through +`status` param. Defaults to 'yellow' and 'red'. 'yellow' lists store information of +shards with at least one unassigned replica and 'red' for shards with unassigned +primary shard. +Use 'green' to list store information for shards with all assigned copies. + +[source,js] +-------------------------------------------------- +curl -XGET 'http://localhost:9200/_shard_stores?status=green' +-------------------------------------------------- + +Response: + +The shard stores information is grouped by indices and shard ids. + +[source,js] +-------------------------------------------------- +{ + ... + "0": { <1> + "stores": [ <2> + { + "sPa3OgxLSYGvQ4oPs-Tajw": { <3> + "name": "node_t0", + "transport_address": "local[1]", + "attributes": { + "enable_custom_paths": "true", + "mode": "local" + } + }, + "version": 4, <4> + "allocation" : "primary" | "replica" | "unused", <6> + "store_exception": ... <5> + }, + ... + ] + }, + ... +} +-------------------------------------------------- +<1> The key is the corresponding shard id for the store information +<2> A list of store information for all copies of the shard +<3> The node information that hosts a copy of the store, the key + is the unique node id. +<4> The version of the store copy +<5> The status of the store copy, whether it is used as a + primary, replica or not used at all +<6> Any exception encountered while opening the shard index or + from earlier engine failure diff --git a/docs/reference/mapping/fields.asciidoc b/docs/reference/mapping/fields.asciidoc index 2147d41a3dc..e2d42a93558 100644 --- a/docs/reference/mapping/fields.asciidoc +++ b/docs/reference/mapping/fields.asciidoc @@ -1,27 +1,78 @@ [[mapping-fields]] -== Fields +== Meta-Fields -Each mapping has a number of fields associated with it -which can be used to control how the document metadata -(eg <>) is indexed. +Each document has metadata associated with it, such as the `_index`, mapping +<>, and `_id` meta-fields. The behaviour of some of these meta-fields +can be customised when a mapping type is created. + +The meta-fields are: + +[horizontal] +<>:: + + The index to which the document belongs. + +<>:: + + A composite field consisting of the `_type` and the `_id`. + +<>:: + + The document's <>. + +<>:: + + The document's ID. + +<>:: + + The original JSON representing the body of the document. + +<>:: + + A _catch-all_ field that indexes the values of all other fields. + +<>:: + + All fields in the document which contain non-null values. + +<>:: + + Used to create a parent-child relationship between two mapping types. + +<>:: + + A custom routing value which routes a document to a particular shard. + +<>:: + + The size of the `_source` field in bytes. + +<>:: + + A timestamp associated with the document, either specified manually or auto-generated. + +<>:: + + How long a document should live before it is automatically deleted. + +include::fields/index-field.asciidoc[] include::fields/uid-field.asciidoc[] -include::fields/id-field.asciidoc[] - include::fields/type-field.asciidoc[] +include::fields/id-field.asciidoc[] + include::fields/source-field.asciidoc[] include::fields/all-field.asciidoc[] -include::fields/parent-field.asciidoc[] - include::fields/field-names-field.asciidoc[] -include::fields/routing-field.asciidoc[] +include::fields/parent-field.asciidoc[] -include::fields/index-field.asciidoc[] +include::fields/routing-field.asciidoc[] include::fields/size-field.asciidoc[] diff --git a/docs/reference/mapping/fields/all-field.asciidoc b/docs/reference/mapping/fields/all-field.asciidoc index 519d3809923..d6037f4e804 100644 --- a/docs/reference/mapping/fields/all-field.asciidoc +++ b/docs/reference/mapping/fields/all-field.asciidoc @@ -1,78 +1,416 @@ [[mapping-all-field]] -=== `_all` +=== `_all` field -The idea of the `_all` field is that it includes the text of one or more -other fields within the document indexed. It can come very handy -especially for search requests, where we want to execute a search query -against the content of a document, without knowing which fields to -search on. This comes at the expense of CPU cycles and index size. +The `_all` field is a special _catch-all_ field which concatenates the values +of all of the other fields into one big string, which is then +<> and indexed, but not stored. This means that it can be +searched, but not retrieved. -The `_all` fields can be completely disabled. Explicit field mappings and -object mappings can be excluded / included in the `_all` field. By -default, it is enabled and all fields are included in it for ease of -use. - -When disabling the `_all` field, it is a good practice to set -`index.query.default_field` to a different value (for example, if you -have a main "message" field in your data, set it to `message`). - -One of the nice features of the `_all` field is that it takes into -account specific fields boost levels. Meaning that if a title field is -boosted more than content, the title (part) in the `_all` field will -mean more than the content (part) in the `_all` field. - -Here is a sample mapping: +The `_all` field allows you to search for values in documents without knowing +which field contains the value. This makes it a useful option when getting +started with a new dataset. For instance: [source,js] --------------------------------------------------- +-------------------------------- +PUT my_index/user/1 <1> { - "person" : { - "_all" : {"enabled" : true}, - "properties" : { - "name" : { - "type" : "object", - "dynamic" : false, - "properties" : { - "first" : {"type" : "string", "store" : true , "include_in_all" : false}, - "last" : {"type" : "string", "index" : "not_analyzed"} - } - }, - "address" : { - "type" : "object", - "include_in_all" : false, - "properties" : { - "first" : { - "properties" : { - "location" : {"type" : "string", "store" : true} - } - }, - "last" : { - "properties" : { - "location" : {"type" : "string"} - } - } - } - }, - "simple1" : {"type" : "long", "include_in_all" : true}, - "simple2" : {"type" : "long", "include_in_all" : false} - } - } + "first_name": "John", + "last_name": "Smith", + "date_of_birth": "1970-10-24" } --------------------------------------------------- -The `_all` fields allows for `store`, `term_vector` and `analyzer` (with -specific `analyzer` and `search_analyzer`) to be set. +GET my_index/_search +{ + "query": { + "match": { + "_all": "john smith 1970" + } + } +} +-------------------------------- +// AUTOSENSE +<1> The `_all` field will contain the terms: [ `"john"`, `"smith"`, `"1970"`, `"10"`, `"24"` ] -[float] -[[highlighting]] -==== Highlighting +[NOTE] +.All values treated as strings +============================================================================= + +The `date_of_birth` field in the above example is recognised as a `date` field +and so will index a single term representing `1970-10-24 00:00:00 UTC`. The +`_all` field, however, treats all values as strings, so the date value is +indexed as the three string terms: `"1970"`, `"24"`, `"10"`. + +It is important to note that the `_all` field combines the original values +from each field as a string. It does not combine the _terms_ from each field. + +============================================================================= + +The `_all` field is just a <> field, and accepts the same +parameters that other string fields accept, including `analyzer`, +`term_vectors`, `index_options`, and `store`. + +The `_all` field can be useful, especially when exploring new data using +simple filtering. However, by concatenating field values into one big string, +the `_all` field loses the distinction between short fields (more relevant) +and long fields (less relevant). For use cases where search relevance is +important, it is better to query individual fields specifically. + +The `_all` field is not free: it requires extra CPU cycles and uses more disk +space. If not needed, it can be completely <> or +customised on a <>. + +[[querying-all-field]] +==== Using the `_all` field in queries + +The <> and +<> queries query +the `_all` field by default, unless another field is specified: + +[source,js] +-------------------------------- +GET _search +{ + "query": { + "query_string": { + "query": "john smith 1970" + } + } +} +-------------------------------- +// AUTOSENSE + +The same goes for the `?q=` parameter in <> (which is rewritten to a `query_string` query internally): + +[source,js] +-------------------------------- +GET _search?q=john+smith+1970 +-------------------------------- + +Other queries, such as the <> and +<> queries require you to specify +the `_all` field explicitly, as per the +<>. + +[[disabling-all-field]] +==== Disabling the `_all` field + +The `_all` field can be completely disabled per-type by setting `enabled` to +`false`: + +[source,js] +-------------------------------- +PUT my_index +{ + "mappings": { + "type_1": { <1> + "properties": {...} + }, + "type_2": { <2> + "_all": { + "enabled": false + }, + "properties": {...} + } + } +} +-------------------------------- +// AUTOSENSE + +<1> The `_all` field in `type_1` is enabled. +<2> The `_all` field in `type_2` is completely disabled. + +If the `_all` field is disabled, then URI search requests and the +`query_string` and `simple_query_string` queries will not be able to use it +for queries (see <>). You can configure them to use a +different field with the `index.query.default_field` setting: + +[source,js] +-------------------------------- +PUT my_index +{ + "mappings": { + "my_type": { + "_all": { + "enabled": false <1> + }, + "properties": { + "content": { + "type": "string" + } + } + } + }, + "settings": { + "index.query.default_field": "content" <2> + }, +} +-------------------------------- +// AUTOSENSE + +<1> The `_all` field is disabled for the `my_type` type. +<2> The `query_string` query will default to querying the `content` field in this index. + +[[include-in-all]] +==== Including specific fields in `_all` + +Individual fields can be included or excluded from the `_all` field with the +`include_in_all` setting, which defaults to `true`: + +[source,js] +-------------------------------- +PUT my_index +{ + "mappings": { + "my_type": { + "properties": { + "title": { <1> + "type": "string" + } + "content": { <1> + "type": "string" + }, + "date": { <2> + "type": "date", + "include_in_all": false + } + } + } + } +} +-------------------------------- +// AUTOSENSE + +<1> The `title` and `content` fields with be included in the `_all` field. +<2> The `date` field will not be included in the `_all` field. + +The `include_in_all` parameter can also be set at the type level and on +<> or <> fields, +in which case all sub-fields inherit that setting. For instance: + +[source,js] +-------------------------------- +PUT my_index +{ + "mappings": { + "my_type": { + "include_in_all": false, <1> + "properties": { + "title": { "type": "string" }, + "author": { + "include_in_all": true, <2> + "properties": { + "first_name": { "type": "string" }, + "last_name": { "type": "string" } + } + }, + "editor": { + "properties": { + "first_name": { "type": "string" }, <3> + "last_name": { "type": "string", "include_in_all": true } <3> + } + } + } + } + } +} +-------------------------------- +// AUTOSENSE + +<1> All fields in `my_type` are excluded from `_all`. +<2> The `author.first_name` and `author.last_name` fields are included in `_all`. +<3> Only the `editor.last_name` field is included in `_all`. + The `editor.first_name` inherits the type-level setting and is excluded. + +[[all-field-and-boosting]] +==== Index boosting and the `_all` field + +Individual fields can be _boosted_ at index time, with the `boost` parameter. +The `_all` field takes these boosts into account: + +[source,js] +-------------------------------- +PUT myindex +{ + "mappings": { + "mytype": { + "properties": { + "title": { <1> + "type": "string", + "boost": 2 + }, + "content": { <1> + "type": "string" + } + } + } + } +} +-------------------------------- +// AUTOSENSE + +<1> When querying the `_all` field, words that originated in the + `title` field are twice as relevant as words that originated in + the `content` field. + +WARNING: Using index-time boosting with the `_all` field has a significant +impact on query performance. Usually the better solution is to query fields +individually, with optional query time boosting. + + +[[custom-all-fields]] +==== Custom `_all` fields + +While there is only a single `_all` field per index, the <> +parameter allows the creation of multiple __custom `_all` fields__. For +instance, `first_name` and `last_name` fields can be combined together into +the `full_name` field: + +[source,js] +-------------------------------- +PUT myindex +{ + "mappings": { + "mytype": { + "properties": { + "first_name": { + "type": "string", + "copy_to": "full_name" <1> + }, + "last_name": { + "type": "string", + "copy_to": "full_name" <1> + }, + "full_name": { + "type": "string" + } + } + } + } +} + +PUT myindex/mytype/1 +{ + "first_name": "John", + "last_name": "Smith" +} + +GET myindex/_search +{ + "query": { + "match": { + "full_name": "John Smith" + } + } +} +-------------------------------- +// AUTOSENSE + +<1> The `first_name` and `last_name` values are copied to the `full_name` field. + +[[highlighting-all-field]] +==== Highlighting and the `_all` field + +A field can only be used for <> if +the original string value is available, either from the +<> field or as a stored field. + +The `_all` field is not present in the `_source` field and it is not stored by +default, and so cannot be highlighted. There are two options. Either +<> or highlight the +<>. + +[[all-field-store]] +===== Store the `_all` field + +If `store` is set to `true`, then the original field value is retrievable and +can be highlighted: + +[source,js] +-------------------------------- +PUT myindex +{ + "mappings": { + "mytype": { + "_all": { + "store": true + } + } + } +} + +PUT myindex/mytype/1 +{ + "first_name": "John", + "last_name": "Smith" +} + +GET _search +{ + "query": { + "match": { + "_all": "John Smith" + } + }, + "highlight": { + "fields": { + "_all": {} + } + } +} +-------------------------------- +// AUTOSENSE + +Of course, storing the `_all` field will use significantly more disk space +and, because it is a combination of other fields, it may result in odd +highlighting results. + +The `_all` field also accepts the `term_vector` and `index_options` +parameters, allowing the use of the fast vector highlighter and the postings +highlighter. + +[[all-highlight-fields]] +===== Highlight original fields + +You can query the `_all` field, but use the original fields for highlighting as follows: + +[source,js] +-------------------------------- +PUT myindex +{ + "mappings": { + "mytype": { + "_all": {} + } + } +} + +PUT myindex/mytype/1 +{ + "first_name": "John", + "last_name": "Smith" +} + +GET _search +{ + "query": { + "match": { + "_all": "John Smith" <1> + } + }, + "highlight": { + "fields": { + "*_name": { <2> + "require_field_match": "false" <3> + } + } + } +} +-------------------------------- +// AUTOSENSE + +<1> The query inspects the `_all` field to find matching documents. +<2> Highlighting is performed on the two name fields, which are available from the `_source`. +<3> The query wasn't run against the name fields, so set `require_field_match` to `false`. -For any field to allow -<> it has -to be either stored or part of the `_source` field. By default the `_all` -field does not qualify for either, so highlighting for it does not yield -any data. -Although it is possible to `store` the `_all` field, it is basically an -aggregation of all fields, which means more data will be stored, and -highlighting it might produce strange results. diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 4f3e4a2b3fb..2c40f72bbea 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -1,6 +1,55 @@ [[mapping-field-names-field]] -=== `_field_names` +=== `_field_names` field + +The `_field_names` field indexes the names of every field in a document that +contains any value other than `null`. This field is used by the +<> and <> +queries to find documents that either have or don't have any non-+null+ value +for a particular field. + +The value of the `_field_name` field is accessible in queries, aggregations, and +scripts: + +[source,js] +-------------------------- +# Example documents +PUT my_index/my_type/1 +{ + "title": "This is a document" +} + +PUT my_index/my_type/1 +{ + "title": "This is another document", + "body": "This document has a body" +} + +GET my_index/_search +{ + "query": { + "terms": { + "_field_names": [ "title" ] <1> + } + }, + "aggs": { + "Field names": { + "terms": { + "field": "_field_names", <2> + "size": 10 + } + } + }, + "script_fields": { + "Field names": { + "script": "doc['_field_names']" <3> + } + } +} + +-------------------------- +// AUTOSENSE + +<1> Querying on the `_field_names` field (also see the <> and <> queries) +<2> Aggregating on the `_field_names` field +<3> Accessing the `_field_names` field in scripts (inline scripts must be <> for this example to work) -The `_field_names` field indexes the field names of a document, which can later -be used to search for documents based on the fields that they contain typically -using the `exists` and `missing` filters. diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index 8150a13afe5..3aa6b927128 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -1,11 +1,44 @@ [[mapping-id-field]] -=== `_id` +=== `_id` field -Each document indexed is associated with an id and a type. The `_id` -field allows accessing only the id of a document. +Each document indexed is associated with a <> (see +<>) and an <>. The +`_id` field is not indexed as its value can be derived automatically from the +<> field. -Note, even though the `_id` is not indexed, all the APIs still work -(since they work with the `_uid` field), as well as fetching by ids -using `term`, `terms` or `prefix` queries/filters (including the -specific `ids` query/filter). +The value of the `_id` field is accessible in queries and scripts, but _not_ +in aggregations or when sorting, where the <> field +should be used instead: + +[source,js] +-------------------------- +# Example documents +PUT my_index/my_type/1 +{ + "text": "Document with ID 1" +} + +PUT my_index/my_type/2 +{ + "text": "Document with ID 2" +} + +GET my_index/_search +{ + "query": { + "terms": { + "_id": [ "1", "2" ] <1> + } + }, + "script_fields": { + "UID": { + "script": "doc['_id']" <2> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_id` field (also see the <>) +<2> Accessing the `_id` field in scripts (inline scripts must be <> for this example to work) diff --git a/docs/reference/mapping/fields/index-field.asciidoc b/docs/reference/mapping/fields/index-field.asciidoc index 96a320b9fa5..c5db81ba834 100644 --- a/docs/reference/mapping/fields/index-field.asciidoc +++ b/docs/reference/mapping/fields/index-field.asciidoc @@ -1,15 +1,56 @@ [[mapping-index-field]] -=== `_index` +=== `_index` field -The ability to store in a document the index it belongs to. By default -it is disabled, in order to enable it, the following mapping should be -defined: +The name of the index that contains the document. This field is not indexed +but can be automatically derived from the index itself. + +Its value is accessible in queries, aggregations, scripts, and when sorting: [source,js] --------------------------------------------------- +-------------------------- +# Example documents +PUT index_1/my_type/1 { - "tweet" : { - "_index" : { "enabled" : true } - } + "text": "Document in index 1" } --------------------------------------------------- + +PUT index_2/my_type/2 +{ + "text": "Document in index 2" +} + +GET index_1,index_2/_search +{ + "query": { + "terms": { + "_index": ["index_1", "index_2"] <1> + } + }, + "aggs": { + "indices": { + "terms": { + "field": "_index", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_index": { <3> + "order": "asc" + } + } + ], + "script_fields": { + "index_name": { + "script": "doc['_index']" <4> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_index` field +<2> Aggregating on the `_index` field +<3> Sorting on the `_index` field +<4> Accessing the `_index` field in scripts (inline scripts must be <> for this example to work) diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc index 3980df222c4..48fa567e217 100644 --- a/docs/reference/mapping/fields/parent-field.asciidoc +++ b/docs/reference/mapping/fields/parent-field.asciidoc @@ -1,54 +1,165 @@ [[mapping-parent-field]] -=== `_parent` +=== `_parent` field -TIP: It is highly recommend to reindex all indices with `_parent` field created before version 2.x. - The reason for this is to gain from all the optimizations added with the 2.0 release. +added[2.0.0,The parent-child implementation has been completely rewritten. It is advisable to reindex any 1.x indices which use parent-child to take advantage of the new optimizations] -The parent field mapping is defined on a child mapping, and points to -the parent type this child relates to. For example, in case of a `blog` -type and a `blog_tag` type child document, the mapping for `blog_tag` -should be: +A parent-child relationship can be established between documents in the same +index by making one mapping type the parent of another: [source,js] -------------------------------------------------- +PUT my_index { - "blog_tag" : { - "_parent" : { - "type" : "blog" - } + "mappings": { + "my_parent": {}, + "my_child": { + "_parent": { + "type": "my_parent" <1> + } } + } +} + +PUT my_index/my_parent/1 <2> +{ + "text": "This is a parent document" +} + +PUT my_index/my_child/2?parent=1 <3> +{ + "text": "This is a child document" +} + +PUT my_index/my_child/3?parent=1 <3> +{ + "text": "This is another child document" +} + +GET my_index/my_parent/_search +{ + "query": { + "has_child": { <4> + "type": "my_child", + "query": { + "match": { + "text": "child document" + } + } + } + } } -------------------------------------------------- +// AUTOSENSE +<1> The `my_parent` type is parent to the `my_child` type. +<2> Index a parent document. +<3> Index two child documents, specifying the parent document's ID. +<4> Find all parent documents that have children which match the query. -The mapping is automatically stored and indexed (meaning it can be -searched on using the `_parent` field notation). -==== Limitations +See the <> and +<> queries, +the <> aggregation, +and <> for more information. -The `_parent.type` setting can only point to a type that doesn't exist yet. -This means that a type can't become a parent type after is has been created. +The value of the `_parent` field is accessible in queries, aggregations, scripts, +and when sorting: -The `parent.type` setting can't point to itself. This means self referential -parent/child isn't supported. +[source,js] +-------------------------- +GET my_index/_search +{ + "query": { + "terms": { + "_parent": [ "1" ] <1> + } + }, + "aggs": { + "parents": { + "terms": { + "field": "_parent", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_parent": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "parent": { + "script": "doc['_parent']" <4> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_parent` field (also see the <> and the <>) +<2> Aggregating on the `_parent` field (also see the <> aggregation) +<3> Sorting on the `_parent` field +<4> Accessing the `_parent` field in scripts (inline scripts must be <> for this example to work) + + +==== Parent-child restrictions + +* The parent and child types must be different -- parent-child relationships + cannot be established between documents of the same type. + +* The `_parent.type` setting can only point to a type that doesn't exist yet. + This means that a type cannot become a parent type after it is has been + created. + +* Parent and child documents must be indexed on the same shard. The `parent` + ID is used as the <> value for the child, + to ensure that the child is indexed on the same shard as the parent. + This means that the same `parent` value needs to be provided when + <>, <>, or <> + a child document. ==== Global ordinals -Parent-child uses <> to speed up joins and global ordinals need to be rebuilt after any change to a shard. -The more parent id values are stored in a shard, the longer it takes to rebuild global ordinals for the `_parent` field. +Parent-child uses <> to speed up joins. +Global ordinals need to be rebuilt after any change to a shard. The more +parent id values are stored in a shard, the longer it takes to rebuild the +global ordinals for the `_parent` field. -Global ordinals, by default, are built lazily: the first parent-child query or aggregation after a refresh will trigger building of global ordinals. -This can introduce a significant latency spike for your users. You can use <> to shift the cost of building global ordinals -from query time to refresh time, by mapping the _parent field as follows: - -==== Memory usage - -The only on heap memory used by parent/child is the global ordinals for the `_parent` field. - -How much memory is used for the global ordianls for the `_parent` field in the fielddata cache -can be checked via the <> or <> -APIS, eg: +Global ordinals, by default, are built lazily: the first parent-child query or +aggregation after a refresh will trigger building of global ordinals. This can +introduce a significant latency spike for your users. You can use +<> to shift the cost of building global +ordinals from query time to refresh time, by mapping the `_parent` field as follows: [source,js] -------------------------------------------------- -curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent" +PUT my_index +{ + "mappings": { + "my_parent": {}, + "my_child": { + "_parent": { + "type": "my_parent", + "fielddata": { + "loading": "eager_global_ordinals" + } + } + } + } +} -------------------------------------------------- +// AUTOSENSE + +The amount of heap used by global ordinals can be checked as follows: + +[source,sh] +-------------------------------------------------- +# Per-index +GET _stats/fielddata?human&fields=_parent + +# Per-node per-index +GET _nodes/stats/indices/fielddata?human&fields=_parent +-------------------------------------------------- +// AUTOSENSE + diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index 37b173e5aa3..f71bbbf0cc0 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -1,22 +1,134 @@ [[mapping-routing-field]] -=== `_routing` +=== `_routing` field -The routing field allows to control the `_routing` aspect when indexing -data and explicit routing control is required. It is stored and indexed. +A document is routed to a particular shard in an index using the following +formula: -[float] -==== required + shard_num = hash(_routing) % num_primary_shards -Another aspect of the `_routing` mapping is the ability to define it as -required by setting `required` to `true`. This is very important to set -when using routing features, as it allows different APIs to make use of -it. For example, an index operation will be rejected if no routing value -has been provided. +The default value used for `_routing` is the document's <> +or the document's <> ID, if present. -[float] -==== id uniqueness +Custom routing patterns can be implemented by specifying a custom `routing` +value per document. For instance: -When indexing documents specifying a custom `_routing`, the uniqueness -of the `_id` is not guaranteed throughout all the shards that the index -is composed of. In fact, documents with the same `_id` might end up in -different shards if indexed with different `_routing` values. +[source,js] +------------------------------ +PUT my_index/my_type/1?routing=user1 <1> +{ + "title": "This is a document" +} + +GET my_index/my_type/1?routing=user1 <2> +------------------------------ +// AUTOSENSE + +<1> This document uses `user1` as its routing value, instead of its ID. +<2> The the same `routing` value needs to be provided when + <>, <>, or <> + the document. + +The value of the `_routing` field is accessible in queries, aggregations, scripts, +and when sorting: + +[source,js] +-------------------------- +GET my_index/_search +{ + "query": { + "terms": { + "_routing": [ "user1" ] <1> + } + }, + "aggs": { + "Routing values": { + "terms": { + "field": "_routing", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_routing": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "Routing value": { + "script": "doc['_routing']" <4> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_routing` field (also see the <>) +<2> Aggregating on the `_routing` field +<3> Sorting on the `_routing` field +<4> Accessing the `_routing` field in scripts (inline scripts must be <> for this example to work) + + +==== Searching with custom routing + +Custom routing can reduce the impact of searches. Instead of having to fan +out a search request to all the shards in an index, the request can be sent to +just the shard that matches the specific routing value (or values): + +[source,js] +------------------------------ +GET my_index/_search?routing=user1,user2 <1> +{ + "query": { + "match": { + "title": "document" + } + } +} +------------------------------ +// AUTOSENSE + +<1> This search request will only be executed on the shards associated with the `user1` and `user2` routing values. + + +==== Making a routing value required + +When using custom routing, it is important to provide the routing value +whenever <>, <>, +<>, or <> a document. + +Forgetting the routing value can lead to a document being indexed on more than +one shard. As a safeguard, the `_routing` field can be configured to make a +custom `routing` value required for all CRUD operations: + +[source,js] +------------------------------ +PUT my_index +{ + "mappings": { + "my_type": { + "_routing": { + "required": true <1> + } + } + } +} + +PUT my_index/my_type/1 <2> +{ + "text": "No routing value provided" +} +------------------------------ +// AUTOSENSE +<1> Routing is required for `my_type` documents. +<2> This index request throws a `routing_missing_exception`. + +==== Unique IDs with custom routing + +When indexing documents specifying a custom `_routing`, the uniqueness of the +`_id` is not guaranteed across all of the shards in the index. In fact, +documents with the same `_id` might end up on different shards if indexed with +different `_routing` values. + +It is up to the user to ensure that IDs are unique across the index. diff --git a/docs/reference/mapping/fields/size-field.asciidoc b/docs/reference/mapping/fields/size-field.asciidoc index 9b35693bf15..aa87f90538a 100644 --- a/docs/reference/mapping/fields/size-field.asciidoc +++ b/docs/reference/mapping/fields/size-field.asciidoc @@ -1,15 +1,76 @@ [[mapping-size-field]] -=== `_size` +=== `_size` field -The `_size` field allows to automatically index the size of the original -`_source` indexed. By default, it's disabled. In order to enable it, set -the mapping to: +The `_size` field, when enabled, indexes the size in bytes of the original +<>. In order to enable it, set +the mapping as follows: [source,js] --------------------------------------------------- +-------------------------- +PUT my_index { - "tweet" : { - "_size" : {"enabled" : true} + "mappings": { + "my_type": { + "_size": { + "enabled": true + } } + } } --------------------------------------------------- +-------------------------- +// AUTOSENSE + +The value of the `_size` field is accessible in queries, aggregations, scripts, +and when sorting: + +[source,js] +-------------------------- +# Example documents +PUT my_index/my_type/1 +{ + "text": "This is a document" +} + +PUT my_index/my_type/2 +{ + "text": "This is another document" +} + +GET my_index/_search +{ + "query": { + "range": { + "_size": { <1> + "gt": 10 + } + } + }, + "aggs": { + "Sizes": { + "terms": { + "field": "_size", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_size": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "Size": { + "script": "doc['_size']" <4> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_size` field +<2> Aggregating on the `_size` field +<3> Sorting on the `_size` field +<4> Accessing the `_size` field in scripts (inline scripts must be <> for this example to work) + diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 1f51793b97b..f33b4a72c82 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -1,13 +1,136 @@ [[mapping-source-field]] -=== `_source` +=== `_source` field -The `_source` field is an automatically generated field that stores the -actual JSON that was used as the indexed document. It is not indexed -(searchable), just stored. When executing "fetch" requests, like -<> or -<>, the `_source` field is -returned by default. +The `_source` field contains the original JSON document body that was passed +at index time. The `_source` field itself is not indexed (and thus is not +searchable), but it is stored so that it can be returned when executing +_fetch_ requests, like <> or <>. + +==== Disabling the `_source` field + +Though very handy to have around, the source field does incur storage overhead +within the index. For this reason, it can be disabled as follows: + +[source,js] +-------------------------------------------------- +PUT tweets +{ + "mappings": {}, + "tweet": { + "_source": { + "enabled": false + } + } +} +-------------------------------------------------- +// AUTOSENSE + +[WARNING] +.Think before disabling the `_source` field +================================================== + +Users often disable the `_source` field without thinking about the +consequences, and then live to regret it. If the `_source` field isn't +available then a number of features are not supported: + +* The <>. + +* On the fly <>. + +* The ability to reindex from one Elasticsearch index to another, either + to change mappings or analysis, or to upgrade an index to a new major + version. + +* The ability to debug queries or aggregations by viewing the original + document used at index time. + +* Potentially in the future, the ability to repair index corruption + automatically. +================================================== + +TIP: If disk space is a concern, rather increase the +<> instead of disabling the `_source`. + +.The metrics use case +************************************************** + +The _metrics_ use case is distinct from other time-based or logging use cases +in that there are many small documents which consist only of numbers, dates, +or keywords. There are no updates, no highlighting requests, and the data +ages quickly so there is no need to reindex. Search requests typically use +simple queries to filter the dataset by date or tags, and the results are +returned as aggregations. + +In this case, disabling the `_source` field will save space and reduce I/O. +It is also advisable to disable the <> in the +metrics case. + +************************************************** + + +[[include-exclude]] +==== Including / Excluding fields from `_source` + +An expert-only feature is the ability to prune the contents of the `_source` +field after the document has been indexed, but before the `_source` field is +stored. + +WARNING: Removing fields from the `_source` has similar downsides to disabling +`_source`, especially the fact that you cannot reindex documents from one +Elasticsearch index to another. Consider using +<> or a +<> instead. + +The `includes`/`excludes` parameters (which also accept wildcards) can be used +as follows: + +[source,js] +-------------------------------------------------- +PUT logs +{ + "mappings": { + "event": { + "_source": { + "includes": [ + "*.count", + "meta.*" + ], + "excludes": [ + "meta.description", + "meta.other.*" + ] + } + } + } +} + +PUT logs/event/1 +{ + "requests": { + "count": 10, + "foo": "bar" <1> + }, + "meta": { + "name": "Some metric", + "description": "Some metric description", <1> + "other": { + "foo": "one", <1> + "baz": "two" <1> + } + } +} + +GET logs/event/_search +{ + "query": { + "match": { + "meta.other.foo": "one" <2> + } + } +} +-------------------------------------------------- +// AUTOSENSE + +<1> These fields will be removed from the stored `_source` field. +<2> We can still search on this field, even though it is not in the stored `_source`. -Many APIs may use the `_source` field. For example, the -<>. To minimize the storage cost of -`_source`, set `index.codec: best_compression` in index settings. diff --git a/docs/reference/mapping/fields/timestamp-field.asciidoc b/docs/reference/mapping/fields/timestamp-field.asciidoc index 27988b45b50..5971a02c771 100644 --- a/docs/reference/mapping/fields/timestamp-field.asciidoc +++ b/docs/reference/mapping/fields/timestamp-field.asciidoc @@ -1,90 +1,94 @@ [[mapping-timestamp-field]] -=== `_timestamp` +=== `_timestamp` field -The `_timestamp` field allows to automatically index the timestamp of a -document. If it is not provided it will be automatically set -to a <>. - -[float] -==== enabled - -By default it is disabled. In order to enable it, the following mapping -should be defined: +The `_timestamp` field, when enabled, allows a timestamp to be indexed and +stored with a document. The timestamp may be specified manually, generated +automatically, or set to a default value: [source,js] --------------------------------------------------- +------------------------------------ +PUT my_index { - "tweet" : { - "_timestamp" : { "enabled" : true } + "mappings": { + "my_type": { + "_timestamp": { <1> + "enabled": true + } } + } } --------------------------------------------------- -[float] -[[mapping-timestamp-field-format]] -==== format +PUT my_index/my_type/1?timestamp=2015-01-01 <2> +{ "text": "Timestamp as a formatted date" } -You can define the <> used to parse the provided timestamp value. For example: +PUT my_index/my_type/2?timestamp=1420070400000 <3> +{ "text": "Timestamp as milliseconds since the epoch" } + +PUT my_index/my_type/3 <4> +{ "text": "Autogenerated timestamp set to now()" } + +------------------------------------ +// AUTOSENSE + +<1> Enable the `_timestamp` field with default settings. +<2> Set the timestamp manually with a formatted date. +<3> Set the timestamp with milliseconds since the epoch. +<4> Auto-generates a timestamp with <>. + +The behaviour of the `_timestamp` field can be configured with the following parameters: + +`default`:: + + A default value to be used if none is provided. Defaults to <>. + +`format`:: + + The <> (or formats) to use when parsing timestamps. Defaults to `epoch_millis||strictDateOptionalTime`. + +`ignore_missing`:: + + If `true` (default), replace missing timestamps with the `default` value. If `false`, throw an exception. + + +The value of the `_timestamp` field is accessible in queries, aggregations, scripts, +and when sorting: [source,js] --------------------------------------------------- +-------------------------- +GET my_index/_search { - "tweet" : { - "_timestamp" : { - "enabled" : true, - "path" : "post_date", - "format" : "YYYY-MM-dd" - } + "query": { + "range": { + "_timestamp": { <1> + "gte": "2015-01-01" + } } -} --------------------------------------------------- - -Note, the default format is `epoch_millis||strictDateOptionalTime`. The timestamp value will -first be parsed as a number and if it fails the format will be tried. - -[float] -[[mapping-timestamp-field-default]] -==== default - -You can define a default value for when timestamp is not provided -within the index request or in the `_source` document. - -By default, the default value is `now` which means the date the document was processed by the indexing chain. - -You can reject documents which do not provide a `timestamp` value by setting `ignore_missing` to false (default to `true`): - -[source,js] --------------------------------------------------- -{ - "tweet" : { - "_timestamp" : { - "enabled" : true, - "ignore_missing" : false - } + }, + "aggs": { + "Timestamps": { + "terms": { + "field": "_timestamp", <2> + "size": 10 + } } -} --------------------------------------------------- - -You can also set the default value to any date respecting <>: - -[source,js] --------------------------------------------------- -{ - "tweet" : { - "_timestamp" : { - "enabled" : true, - "format" : "YYYY-MM-dd", - "default" : "1970-01-01" - } + }, + "sort": [ + { + "_timestamp": { <3> + "order": "desc" + } } + ], + "script_fields": { + "Timestamp": { + "script": "doc['_timestamp']" <4> + } + } } --------------------------------------------------- - -If you don't provide any timestamp value, _timestamp will be set to this default value. - -In elasticsearch 1.4, we allowed setting explicitly `"default":null` which is not possible anymore -as we added a new `ignore_missing` setting. -When reading an index created with elasticsearch 1.4 and using this, we automatically update it by -removing `"default": null` and setting `"ignore_missing": false` +-------------------------- +// AUTOSENSE +<1> Querying on the `_timestamp` field +<2> Aggregating on the `_timestamp` field +<3> Sorting on the `_timestamp` field +<4> Accessing the `_timestamp` field in scripts (inline scripts must be <> for this example to work) diff --git a/docs/reference/mapping/fields/ttl-field.asciidoc b/docs/reference/mapping/fields/ttl-field.asciidoc index 3a288ae6826..446b1d96502 100644 --- a/docs/reference/mapping/fields/ttl-field.asciidoc +++ b/docs/reference/mapping/fields/ttl-field.asciidoc @@ -1,67 +1,106 @@ [[mapping-ttl-field]] === `_ttl` -A lot of documents naturally come with an expiration date. Documents can -therefore have a `_ttl` (time to live), which will cause the expired -documents to be deleted automatically. +Some types of documents, such as session data or special offers, come with an +expiration date. The `_ttl` field allows you to specify the minimum time a +document should live, after which time the document is deleted automatically. -`_ttl` accepts two parameters which are described below, every other setting will be silently ignored. +[TIP] +.Prefer index-per-timeframe to TTL +====================================================== -[float] -==== enabled +With TTL , expired documents first have to be marked as deleted then later +purged from the index when segments are merged. For append-only time-based +data such as log events, it is much more efficient to use an index-per-day / +week / month instead of TTLs. Old log data can be removed by simply deleting +old indices. -By default it is disabled, in order to enable it, the following mapping -should be defined: +====================================================== + +The `_ttl` field may be enabled as follows: [source,js] --------------------------------------------------- +------------------------------- +PUT my_index { - "tweet" : { - "_ttl" : { "enabled" : true } + "mappings": { + "my_type": { + "_ttl": { + "enabled": true + } } + } } --------------------------------------------------- -`_ttl` can only be enabled once and never be disabled again. +PUT my_index/my_type/1?ttl=10m <1> +{ + "text": "Will expire in 10 minutes" +} -[float] -==== default +PUT my_index/my_type/2 <2> +{ + "text": "Will not expire" +} +------------------------------- +// AUTOSENSE +<1> This document will expire 10 minutes after being indexed. +<2> This document has no TTL set and will not expire. -You can provide a per index/type default `_ttl` value as follows: +The expiry time is calculated as the value of the +<> field (or `now()` if the `_timestamp` +is not enabled) plus the `ttl` specified in the indexing request. + +==== Default TTL + +You can provide a default `_ttl`, which will be applied to indexing requests where the `ttl` is not specified: [source,js] --------------------------------------------------- +------------------------------- +PUT my_index { - "tweet" : { - "_ttl" : { "enabled" : true, "default" : "1d" } + "mappings": { + "my_type": { + "_ttl": { + "enabled": true, + "defaut": "5m" + } } + } } --------------------------------------------------- -In this case, if you don't provide a `_ttl` value in your query or in -the `_source` all tweets will have a `_ttl` of one day. +PUT my_index/my_type/1?ttl=10m <1> +{ + "text": "Will expire in 10 minutes" +} -In case you do not specify a time unit like `d` (days), `m` (minutes), -`h` (hours), `ms` (milliseconds) or `w` (weeks), milliseconds is used as -default unit. +PUT my_index/my_type/2 <2> +{ + "text": "Will expire in 5 minutes" +} +------------------------------- +// AUTOSENSE +<1> This document will expire 10 minutes after being indexed. +<2> This document has no TTL set and so will expire after the default 5 minutes. -If no `default` is set and no `_ttl` value is given then the document -has an infinite `_ttl` and will not expire. +The `default` value can use <> like `d` for days, and +will use `ms` as the default unit if no time unit is provided. You can dynamically update the `default` value using the put mapping API. It won't change the `_ttl` of already indexed documents but will be used for future documents. -[float] ==== Note on documents expiration -Expired documents will be automatically deleted regularly. You can -dynamically set the `indices.ttl.interval` to fit your needs. The -default value is `60s`. +Expired documents will be automatically deleted periodoically. The following +settings control the expiry process: -The deletion orders are processed by bulk. You can set -`indices.ttl.bulk_size` to fit your needs. The default value is `10000`. +`indices.ttl.interval`:: + +How often the purge process should run. Defaults to `60s`. Expired documents +may still be retrieved before they are purged. + +`indices.ttl.bulk_size`:: + +How many deletions are handled by a single <> request. The +default value is `10000`. -Note that the expiration procedure handle versioning properly so if a -document is updated between the collection of documents to expire and -the delete order, the document won't be deleted. diff --git a/docs/reference/mapping/fields/type-field.asciidoc b/docs/reference/mapping/fields/type-field.asciidoc index 52521d4c25e..bc6c578922d 100644 --- a/docs/reference/mapping/fields/type-field.asciidoc +++ b/docs/reference/mapping/fields/type-field.asciidoc @@ -1,7 +1,60 @@ [[mapping-type-field]] -=== `_type` +=== `_type` field + +Each document indexed is associated with a <> (see +<>) and an <>. The +`_type` field is indexed in order to make searching by type name fast. + +The value of the `_type` field is accessible in queries, aggregations, +scripts, and when sorting: + +[source,js] +-------------------------- +# Example documents +PUT my_index/type_1/1 +{ + "text": "Document with type 1" +} + +PUT my_index/type_2/2 +{ + "text": "Document with type 2" +} + +GET my_index/_search/type_* +{ + "query": { + "terms": { + "_type": [ "type_1", "type_2" ] <1> + } + }, + "aggs": { + "types": { + "terms": { + "field": "_type", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_type": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "type": { + "script": "doc['_type']" <4> + } + } +} + +-------------------------- +// AUTOSENSE + +<1> Querying on the `_type` field +<2> Aggregating on the `_type` field +<3> Sorting on the `_type` field +<4> Accessing the `_type` field in scripts (inline scripts must be <> for this example to work) -Each document indexed is associated with an id and a type. The `_type` -field allows accessing only the type of a document. It is indexed -to allow quickly filtering on type, for example, when performing -a search request on a single or multiple types. diff --git a/docs/reference/mapping/fields/uid-field.asciidoc b/docs/reference/mapping/fields/uid-field.asciidoc index 2bd0acd5486..a6dc6a9a27e 100644 --- a/docs/reference/mapping/fields/uid-field.asciidoc +++ b/docs/reference/mapping/fields/uid-field.asciidoc @@ -1,10 +1,59 @@ [[mapping-uid-field]] -=== `_uid` +=== `_uid` field -Each document indexed is associated with an id and a type, the internal -`_uid` field is the unique identifier of a document within an index and -is composed of the type and the id (meaning that different types can -have the same id and still maintain uniqueness). +Each document indexed is associated with a <> (see +<>) and an <>. These +values are combined as `{type}#{id}` and indexed as the `_uid` field. + +The value of the `_uid` field is accessible in queries, aggregations, scripts, +and when sorting: + +[source,js] +-------------------------- +# Example documents +PUT my_index/my_type/1 +{ + "text": "Document with ID 1" +} + +PUT my_index/my_type/2 +{ + "text": "Document with ID 2" +} + +GET my_index/_search +{ + "query": { + "terms": { + "_uid": [ "my_type#1", "my_type#2" ] <1> + } + }, + "aggs": { + "UIDs": { + "terms": { + "field": "_uid", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_uid": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "UID": { + "script": "doc['_uid']" <4> + } + } +} +-------------------------- +// AUTOSENSE + +<1> Querying on the `_uid` field (also see the <>) +<2> Aggregating on the `_uid` field +<3> Sorting on the `_uid` field +<4> Accessing the `_uid` field in scripts (inline scripts must be <> for this example to work) -The `_uid` field is for type based filtering, as well as for -lookups of `_id` and `_type`. diff --git a/docs/reference/mapping/transform.asciidoc b/docs/reference/mapping/transform.asciidoc index 9377336518a..0fc8aab3204 100644 --- a/docs/reference/mapping/transform.asciidoc +++ b/docs/reference/mapping/transform.asciidoc @@ -47,7 +47,7 @@ source filtering. It can be highlighted if it is marked as stored. The get endpoint will retransform the source if the `_source_transform` parameter is set. Example: -[source,bash] +[source,sh] -------------------------------------------------- curl -XGET "http://localhost:9200/test/example/3?pretty&_source_transform" -------------------------------------------------- diff --git a/docs/reference/mapping/types/core-types.asciidoc b/docs/reference/mapping/types/core-types.asciidoc index 945a5c4e708..f848fbd290e 100644 --- a/docs/reference/mapping/types/core-types.asciidoc +++ b/docs/reference/mapping/types/core-types.asciidoc @@ -143,6 +143,12 @@ defaults to `true` or to the parent `object` type setting. |`ignore_above` |The analyzer will ignore strings larger than this size. Useful for generic `not_analyzed` fields that should ignore long text. +This option is also useful for protecting against Lucene's term byte-length +limit of `32766`. Note: the value for `ignore_above` is the _character count_, +but Lucene counts bytes, so if you have UTF-8 text, you may want to set the +limit to `32766 / 3 = 10922` since UTF-8 characters may occupy at most 3 +bytes. + |`position_offset_gap` |Position increment gap between field instances with the same field name. Defaults to 0. |======================================================================= @@ -183,7 +189,7 @@ for filtering or aggregations. In case you would like to disable norms after the fact, it is possible to do so by using the <>, like this: -[source,json] +[source,js] ------------ PUT my_index/_mapping/my_type { diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index aa156676e85..d77d12fc00c 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -126,7 +126,7 @@ rules: * The response format always has the index name, then the section, then the element name, for instance: + -[source,json] +[source,js] --------------- { "my_index": { @@ -151,7 +151,7 @@ mapping`>>, <>, Previously a document could be indexed as itself, or wrapped in an outer object which specified the `type` name: -[source,json] +[source,js] --------------- PUT /my_index/my_type/1 { @@ -173,7 +173,7 @@ While the `search` API takes a top-level `query` parameter, the <> requests expected the whole body to be a query. These now _require_ a top-level `query` parameter: -[source,json] +[source,js] --------------- GET /_count { @@ -194,7 +194,7 @@ results AFTER aggregations have been calculated. This example counts the top colors in all matching docs, but only returns docs with color `red`: -[source,json] +[source,js] --------------- GET /_search { @@ -221,7 +221,7 @@ Multi-fields are dead! Long live multi-fields! Well, the field type (excluding `object` and `nested`) now accept a `fields` parameter. It's the same thing, but nicer. Instead of: -[source,json] +[source,js] --------------- "title": { "type": "multi_field", @@ -234,7 +234,7 @@ same thing, but nicer. Instead of: you can now write: -[source,json] +[source,js] --------------- "title": { "type": "string", @@ -322,7 +322,7 @@ parameters instead. * Settings, like `index.analysis.analyzer.default` are now returned as proper nested JSON objects, which makes them easier to work with programatically: + -[source,json] +[source,js] --------------- { "index": { diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 1aae0f4d89e..b980d7a8049 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -90,7 +90,7 @@ Script fields in 1.x were only returned as a single value. So even if the return value of a script used to be list, it would be returned as an array containing a single value that is a list too, such as: -[source,json] +[source,js] --------------- "fields": { "my_field": [ @@ -106,7 +106,7 @@ In elasticsearch 2.x, scripts that return a list of values are considered as multivalued fields. So the same example would return the following response, with values in a single array. -[source,json] +[source,js] --------------- "fields": { "my_field": [ @@ -200,7 +200,7 @@ Types can no longer be specified on fields within queries. Instead, specify typ The following is an example query in 1.x over types `t1` and `t2`: -[source,json] +[source,js] --------------- curl -XGET 'localhost:9200/index/_search' { @@ -217,7 +217,7 @@ curl -XGET 'localhost:9200/index/_search' In 2.0, the query should look like the following: -[source,json] +[source,js] --------------- curl -XGET 'localhost:9200/index/t1,t2/_search' { @@ -240,7 +240,7 @@ The following example illustrates the difference between 1.x and 2.0. Given these mappings: -[source,json] +[source,js] --------------- curl -XPUT 'localhost:9200/index' { @@ -262,7 +262,7 @@ curl -XPUT 'localhost:9200/index' The following query was possible in 1.x: -[source,json] +[source,js] --------------- curl -XGET 'localhost:9200/index/type/_search' { @@ -274,7 +274,7 @@ curl -XGET 'localhost:9200/index/type/_search' In 2.0, the same query should now be: -[source,json] +[source,js] --------------- curl -XGET 'localhost:9200/index/type/_search' { @@ -302,7 +302,7 @@ Meta fields can no longer be specified within a document. They should be specifi via the API. For example, instead of adding a field `_parent` within a document, use the `parent` url parameter when indexing that document. -==== Default date format now is `strictDateOptionalDate` +==== Default date format now is `strictDateOptionalTime` Instead of `dateOptionalTime` the new default date format now is `strictDateOptionalTime`, which is more strict in parsing dates. This means, that dates now need to have a four digit year, @@ -347,7 +347,7 @@ In addition, terms aggregations use a custom formatter for boolean (like for dates and ip addresses, which are also backed by numbers) in order to return the user-friendly representation of boolean fields: `false`/`true`: -[source,json] +[source,js] --------------- "buckets": [ { @@ -414,9 +414,11 @@ script.indexed: on === Script parameters -Deprecated script parameters `id`, `file`, and `scriptField` have been removed -from all scriptable APIs. `script_id`, `script_file` and `script` should be used -in their place. +Deprecated script parameters `id`, `file`, `scriptField`, `script_id`, `script_file`, +`script`, `lang` and `params`. The <> should be used in their place. + +The deprecated script parameters have been removed from the Java API so applications using the Java API will +need to be updated. === Groovy scripts sandbox @@ -457,6 +459,17 @@ http.cors.enabled: true http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ --------------- +=== CORS allowed origins + +The CORS allowed origins setting, `http.cors.allow-origin`, no longer has a default value. Previously, the default value +was `*`, which would allow CORS requests from any origin and is considered insecure. The `http.cors.allow-origin` setting +should be specified with only the origins that should be allowed, like so: + +[source,yaml] +--------------- +http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ +--------------- + === Cluster state REST api The cluster state api doesn't return the `routing_nodes` section anymore when @@ -473,7 +486,7 @@ The `filtered` query is deprecated. Instead you should use a `bool` query with a `must` clause for the query and a `filter` clause for the filter. For instance the below query: -[source,json] +[source,js] --------------- { "filtered": { @@ -487,7 +500,7 @@ the below query: } --------------- can be replaced with -[source,json] +[source,js] --------------- { "bool": { @@ -535,18 +548,27 @@ As a consequence the `query` filter serves no purpose anymore and is deprecated. === Snapshot and Restore -Locations of file system repositories has to be now registered using `path.repo` setting. The `path.repo` -setting can contain one or more repository locations: +Locations of the shared file system repositories and the URL repositories with `file:` URLs has to be now registered +using `path.repo` setting. The `path.repo` setting can contain one or more repository locations: [source,yaml] --------------- path.repo: ["/mnt/daily", "/mnt/weekly"] --------------- -If the file system repository location is specified as an absolute path it has to start with one of the locations +If the repository location is specified as an absolute path it has to start with one of the locations specified in `path.repo`. If the location is specified as a relative path, it will be resolved against the first location specified in the `path.repo` setting. +URL repositories with `http:`, `https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the +`repositories.url.allowed_urls` setting. This setting supports wildcards in the place of host, path, query, and +fragment. For example: + +[source,yaml] +----------------------------------- +repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydomain.com/*?*#*"] +----------------------------------- + The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer supported by the snapshot and restore operations. These parameters have been replaced by a single `expand_wildcards` parameter. See <> for more. @@ -694,7 +716,7 @@ put under `fields` like regular stored fields. curl -XGET 'localhost:9200/test/_search?fields=_timestamp,foo' --------------- -[source,json] +[source,js] --------------- { [...] @@ -770,3 +792,8 @@ For the record, official plugins which can use this new simplified form are: * elasticsearch-lang-javascript * elasticsearch-lang-python +=== Aliases + +Fields used in alias filters no longer have to exist in the mapping upon alias creation time. Alias filters are now +parsed at request time and then the fields in filters are resolved from the mapping, whereas before alias filters were +parsed at alias creation time and the parsed form was kept around in memory. diff --git a/docs/reference/modules/cluster/allocation_filtering.asciidoc b/docs/reference/modules/cluster/allocation_filtering.asciidoc index 6fa0343ee4c..d465de04b76 100644 --- a/docs/reference/modules/cluster/allocation_filtering.asciidoc +++ b/docs/reference/modules/cluster/allocation_filtering.asciidoc @@ -12,7 +12,7 @@ node to other nodes in the cluster before shutting it down. For instance, we could decomission a node using its IP address as follows: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { @@ -57,7 +57,7 @@ These special attributes are also supported: All attribute values can be specified with wildcards, eg: -[source,json] +[source,js] ------------------------ PUT _cluster/settings { diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 554324df97e..b6c50f36f92 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -24,7 +24,7 @@ The settings which control logging can be updated dynamically with the `logger.` prefix. For instance, to increase the logging level of the `indices.recovery` module to `DEBUG`, issue this request: -[source,json] +[source,js] ------------------------------- PUT /_cluster/settings { diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index 0da0620a59d..a0d3be9e1f9 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -88,10 +88,14 @@ Nodes can be excluded from becoming a master by setting `node.master` to automatically set to `false`). The `discovery.zen.minimum_master_nodes` sets the minimum -number of master eligible nodes a node should "see" in order to win a master election. -It must be set to a quorum of your master eligible nodes. It is recommended to avoid +number of master eligible nodes that need to join a newly elected master in order for an election to +complete and for the elected node to accept it's mastership. The same setting controls the minimum number of +active master eligible nodes that should be a part of any active cluster. If this requirement is not met the +active master node will step down and a new mastser election will be begin. + +This setting must be set to a quorum of your master eligible nodes. It is recommended to avoid having only two master eligible nodes, since a quorum of two is two. Therefore, a loss -of either master node will result in an inoperable cluster +of either master node will result in an inoperable cluster. [float] [[fault-detection]] diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 8baa8376444..3255361bf86 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -55,11 +55,13 @@ Defaults to `6`. i.e. whether a browser on another origin can do requests to Elasticsearch. Defaults to `false`. -|`http.cors.allow-origin` |Which origins to allow. Defaults to `*`, -i.e. any origin. If you prepend and append a `/` to the value, this will +|`http.cors.allow-origin` |Which origins to allow. Defaults to no origins +allowed. If you prepend and append a `/` to the value, this will be treated as a regular expression, allowing you to support HTTP and HTTPs. for example using `/https?:\/\/localhost(:[0-9]+)?/` would return the -request header appropriately in both cases. +request header appropriately in both cases. `*` is a valid value but is +considered a *security risk* as your elasticsearch instance is open to cross origin +requests from *anywhere*. |`http.cors.max-age` |Browsers send a "preflight" OPTIONS-request to determine CORS settings. `max-age` defines how long the result should diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index 1976e470ad7..ee68d71fa9c 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -40,7 +40,7 @@ evicted. The cache can be expired manually with the <>: -[source,json] +[source,js] ------------------------ curl -XPOST 'localhost:9200/kimchy,elasticsearch/_cache/clear?request_cache=true' ------------------------ @@ -51,7 +51,7 @@ curl -XPOST 'localhost:9200/kimchy,elasticsearch/_cache/clear?request_cache=true The cache is not enabled by default, but can be enabled when creating a new index as follows: -[source,json] +[source,js] ----------------------------- curl -XPUT localhost:9200/my_index -d' { @@ -65,7 +65,7 @@ curl -XPUT localhost:9200/my_index -d' It can also be enabled or disabled dynamically on an existing index with the <> API: -[source,json] +[source,js] ----------------------------- curl -XPUT localhost:9200/my_index/_settings -d' { "index.requests.cache.enable": true } @@ -78,7 +78,7 @@ curl -XPUT localhost:9200/my_index/_settings -d' The `query_cache` query-string parameter can be used to enable or disable caching on a *per-request* basis. If set, it overrides the index-level setting: -[source,json] +[source,js] ----------------------------- curl 'localhost:9200/my_index/_search?request_cache=true' -d' { @@ -131,14 +131,14 @@ setting is provided for completeness' sake only. The size of the cache (in bytes) and the number of evictions can be viewed by index, with the <> API: -[source,json] +[source,js] ------------------------ curl 'localhost:9200/_stats/request_cache?pretty&human' ------------------------ or by node with the <> API: -[source,json] +[source,js] ------------------------ curl 'localhost:9200/_nodes/stats/indices/request_cache?pretty&human' ------------------------ diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc index 52ed8fe7abe..1798225cace 100644 --- a/docs/reference/modules/plugins.asciidoc +++ b/docs/reference/modules/plugins.asciidoc @@ -18,7 +18,7 @@ Installing plugins can either be done manually by placing them under the Installing plugins typically take the following form: -[source,shell] +[source,sh] ----------------------------------- bin/plugin --install plugin_name ----------------------------------- @@ -28,7 +28,7 @@ same version as your elasticsearch version. For older version of elasticsearch (prior to 2.0.0) or community plugins, you would use the following form: -[source,shell] +[source,sh] ----------------------------------- bin/plugin --install // ----------------------------------- @@ -43,7 +43,7 @@ the `artifactId`. A plugin can also be installed directly by specifying the URL for it, for example: -[source,shell] +[source,sh] ----------------------------------- bin/plugin --url file:///path/to/plugin --install plugin-name ----------------------------------- @@ -106,7 +106,7 @@ Removing plugins can either be done manually by removing them under the Removing plugins typically take the following form: -[source,shell] +[source,sh] ----------------------------------- plugin --remove ----------------------------------- @@ -124,7 +124,7 @@ Note that exit codes could be: * `74`: IO error * `70`: other errors -[source,shell] +[source,sh] ----------------------------------- bin/plugin --install mobz/elasticsearch-head --verbose plugin --remove head --silent @@ -137,7 +137,7 @@ By default, the `plugin` script will wait indefinitely when downloading before f The timeout parameter can be used to explicitly specify how long it waits. Here is some examples of setting it to different values: -[source,shell] +[source,sh] ----------------------------------- # Wait for 30 seconds before failing bin/plugin --install mobz/elasticsearch-head --timeout 30s @@ -156,14 +156,14 @@ To install a plugin via a proxy, you can pass the proxy details using the enviro On Linux and Mac, here is an example of setting it: -[source,shell] +[source,sh] ----------------------------------- bin/plugin -DproxyHost=host_name -DproxyPort=port_number --install mobz/elasticsearch-head ----------------------------------- On Windows, here is an example of setting it: -[source,shell] +[source,sh] ----------------------------------- set JAVA_OPTS="-DproxyHost=host_name -DproxyPort=port_number" bin/plugin --install mobz/elasticsearch-head diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 86920d30215..69748f09ed1 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -23,7 +23,7 @@ in the `config/scripts/` directory on every node. To convert an inline script to a file, take this simple script as an example: -[source,json] +[source,js] ----------------------------------- GET /_search { @@ -48,7 +48,7 @@ on every data node in the cluster: Now you can access the script by file name (without the extension): -[source,json] +[source,js] ----------------------------------- GET /_search { diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ea10ae19878..2f75decfe89 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -118,12 +118,24 @@ The following settings are supported: ===== Read-only URL Repository The URL repository (`"type": "url"`) can be used as an alternative read-only way to access data created by the shared file -system repository. The URL specified in the `url` parameter should -point to the root of the shared filesystem repository. The following settings are supported: +system repository. The URL specified in the `url` parameter should point to the root of the shared filesystem repository. +The following settings are supported: [horizontal] `url`:: Location of the snapshots. Mandatory. +URL Repository supports the following protocols: "http", "https", "ftp", "file" and "jar". URL repositories with `http:`, +`https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the `repositories.url.allowed_urls` setting. +This setting supports wildcards in the place of host, path, query, and fragment. For example: + +[source,yaml] +----------------------------------- +repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydomain.com/*?*#*"] +----------------------------------- + +URL repositories with `file:` URLs can only point to locations registered in the `repo.path` setting similiar to +shared file system repository. + [float] ===== Repository plugins @@ -224,7 +236,7 @@ filtering settings and rebalancing algorithm) once the snapshot is finished. Once a snapshot is created information about this snapshot can be obtained using the following command: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/snapshot_1 ----------------------------------- @@ -232,7 +244,7 @@ GET /_snapshot/my_backup/snapshot_1 All snapshots currently stored in the repository can be listed using the following command: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/_all ----------------------------------- @@ -240,14 +252,14 @@ GET /_snapshot/my_backup/_all coming[2.0] A currently running snapshot can be retrieved using the following command: -[source,shell] +[source,sh] ----------------------------------- $ curl -XGET "localhost:9200/_snapshot/my_backup/_current" ----------------------------------- A snapshot can be deleted from the repository using the following command: -[source,shell] +[source,sh] ----------------------------------- DELETE /_snapshot/my_backup/snapshot_1 ----------------------------------- @@ -261,7 +273,7 @@ started by mistake. A repository can be deleted using the following command: -[source,shell] +[source,sh] ----------------------------------- DELETE /_snapshot/my_backup ----------------------------------- @@ -275,7 +287,7 @@ the snapshots. The snapshots themselves are left untouched and in place. A snapshot can be restored using the following command: -[source,shell] +[source,sh] ----------------------------------- POST /_snapshot/my_backup/snapshot_1/_restore ----------------------------------- @@ -368,7 +380,7 @@ index will not be successfully restored unless these index allocation settings a A list of currently running snapshots with their detailed status information can be obtained using the following command: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/_status ----------------------------------- @@ -377,7 +389,7 @@ GET /_snapshot/_status In this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible to limit the results to a particular repository: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/_status ----------------------------------- @@ -386,7 +398,7 @@ GET /_snapshot/my_backup/_status If both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even if it's not currently running: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/snapshot_1/_status ----------------------------------- @@ -394,7 +406,7 @@ GET /_snapshot/my_backup/snapshot_1/_status Multiple ids are also supported: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/snapshot_1,snapshot_2/_status ----------------------------------- @@ -409,7 +421,7 @@ the simplest method that can be used to get notified about operation completion. The snapshot operation can be also monitored by periodic calls to the snapshot info: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/snapshot_1 ----------------------------------- @@ -421,7 +433,7 @@ for available resources before returning the result. On very large shards the wa To get more immediate and complete information about snapshots the snapshot status command can be used instead: -[source,shell] +[source,sh] ----------------------------------- GET /_snapshot/my_backup/snapshot_1/_status ----------------------------------- diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 77fe73feb4b..cde176f3425 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -16,7 +16,7 @@ There are several thread pools, but the important ones include: `search`:: For count/search operations. Defaults to `fixed` - with a size of `3x # of available processors`, + with a size of `int((# of available_processors * 3) / 2) + 1`, queue_size of `1000`. `suggest`:: @@ -40,16 +40,16 @@ There are several thread pools, but the important ones include: queue_size of `1000`. `snapshot`:: - For snapshot/restore operations. Defaults to `scaling`, - keep-alive `5m` with a size of `(# of available processors)/2`. + For snapshot/restore operations. Defaults to `scaling` with a + keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`. `warmer`:: - For segment warm-up operations. Defaults to `scaling` - with a `5m` keep-alive. + For segment warm-up operations. Defaults to `scaling` with a + keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`. `refresh`:: - For refresh operations. Defaults to `scaling` - with a `5m` keep-alive. + For refresh operations. Defaults to `scaling` with a + keep-alive of `5m` and a size of `min(10, (# of available processors)/2)`. `listener`:: Mainly for java client executing of action when listener threaded is set to true. @@ -116,6 +116,25 @@ threadpool: queue_size: 1000 -------------------------------------------------- +[float] +==== `scaling` + +The `scaling` thread pool holds a dynamic number of threads. This number is +proportional to the workload and varies between 1 and the value of the +`size` parameter. + +The `keep_alive` parameter determines how long a thread should be kept +around in the thread pool without it doing any work. + +[source,js] +-------------------------------------------------- +threadpool: + warmer: + type: scaling + size: 8 + keep_alive: 2m +-------------------------------------------------- + [float] [[processors]] === Processors setting diff --git a/docs/reference/query-dsl/filtered-query.asciidoc b/docs/reference/query-dsl/filtered-query.asciidoc index 41825b8dbba..0129e6ad1a2 100644 --- a/docs/reference/query-dsl/filtered-query.asciidoc +++ b/docs/reference/query-dsl/filtered-query.asciidoc @@ -18,7 +18,7 @@ documents that remain. "match": { "tweet": "full text search" } }, "filter": { - "range": { "created": { "gte": "now - 1d / d" }} + "range": { "created": { "gte": "now-1d/d" }} } } } @@ -37,7 +37,7 @@ curl -XGET localhost:9200/_search -d ' "match": { "tweet": "full text search" } }, "filter": { - "range": { "created": { "gte": "now - 1d / d" }} + "range": { "created": { "gte": "now-1d/d" }} } } } @@ -61,7 +61,7 @@ curl -XGET localhost:9200/_search -d ' "query": { "filtered": { <1> "filter": { - "range": { "created": { "gte": "now - 1d / d" }} + "range": { "created": { "gte": "now-1d/d" }} } } } @@ -83,7 +83,7 @@ Multiple filters can be applied by wrapping them in a "query": { "match": { "tweet": "full text search" }}, "filter": { "bool": { - "must": { "range": { "created": { "gte": "now - 1d / d" }}}, + "must": { "range": { "created": { "gte": "now-1d/d" }}}, "should": [ { "term": { "featured": true }}, { "term": { "starred": true }} diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index e58542818d9..7e1766c0574 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -161,7 +161,7 @@ A phrase query matches terms up to a configurable `slop` (which defaults to 0) in any order. Transposed terms have a slop of 2. The `analyzer` can be set to control which analyzer will perform the -analysis process on the text. It default to the field explicit mapping +analysis process on the text. It defaults to the field explicit mapping definition, or the default search analyzer, for example: [source,js] diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 7ed16959056..6fadc36aa73 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -45,7 +45,7 @@ conditions are met: * The `status` field contains the exact word `published`. * The `publish_date` field contains a date from 1 Jan 2015 onwards. -[source,json] +[source,js] ------------------------------------ GET _search { diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc index bef6c45f9ef..fb29903ebeb 100644 --- a/docs/reference/search/field-stats.asciidoc +++ b/docs/reference/search/field-stats.asciidoc @@ -100,7 +100,7 @@ curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creati Response: -[source,json] +[source,js] -------------------------------------------------- { "_shards": { diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 7c6c80ebdc6..7a466405789 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -37,6 +37,18 @@ be used for highlighting if it mapped to have `store` set to `true`. The field name supports wildcard notation. For example, using `comment_*` will cause all fields that match the expression to be highlighted. +[[plain-highlighter]] +==== Plain highlighter + +The default choice of highlighter is of type `plain` and uses the Lucene highlighter. +It tries hard to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries. + +[WARNING] +If you want to highlight a lot of fields in a lot of documents with complex queries this highlighter will not be fast. +In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through +Lucene's query execution planner to get access to low-level match information on the current document. +This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter. + [[postings-highlighter]] ==== Postings highlighter diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index 5a937941e18..274d14bd698 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -10,7 +10,7 @@ Imagine that you are selling shirts, and the user has specified two filters: Gucci in the search results. Normally you would do this with a <>: -[source,json] +[source,js] -------------------------------------------------- curl -XGET localhost:9200/shirts/_search -d ' { @@ -38,7 +38,7 @@ that would allow the user to limit their search results to red Gucci This can be done with a <>: -[source,json] +[source,js] -------------------------------------------------- curl -XGET localhost:9200/shirts/_search -d ' { @@ -73,7 +73,7 @@ Instead, you want to include shirts of all colors during aggregation, then apply the `colors` filter only to the search results. This is the purpose of the `post_filter`: -[source,json] +[source,js] -------------------------------------------------- curl -XGET localhost:9200/shirts/_search -d ' { diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index 28ec3bd96b8..0d07f29475e 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -16,6 +16,13 @@ The `preference` is a query string parameter which can be set to: The operation will go and be executed on the primary shard, and if not available (failover), will execute on other shards. +`_replica`:: + The operation will go and be executed only on a replica shard. + +`_replica_first`:: + The operation will go and be executed only on a replica shard, and if + not available (failover), will execute on other shards. + `_local`:: The operation will prefer to be executed on a local allocated shard if possible. diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index b7770db4f08..8458d37806c 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -54,8 +54,8 @@ Finally, for complete control, you can specify both include and exclude patterns { "_source": { "include": [ "obj1.*", "obj2.*" ], - "exclude": [ "*.description" ], - } + "exclude": [ "*.description" ] + }, "query" : { "term" : { "user" : "kimchy" } } diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 1b95f376f87..bce95289e8e 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -210,7 +210,7 @@ _section_ markers like `{{#line_no}}`. For this reason, the template should either be stored in a file (see <>) or, when used via the REST API, should be written as a string: -[source,json] +[source,js] -------------------- "inline": "{\"query\":{\"filtered\":{\"query\":{\"match\":{\"line\":\"{{text}}\"}},\"filter\":{{{#line_no}}\"range\":{\"line_no\":{{{#start}}\"gte\":\"{{start}}\"{{#end}},{{/end}}{{/start}}{{#end}}\"lte\":\"{{end}}\"{{/end}}}}{{/line_no}}}}}}" -------------------- diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 6a9220f5016..456dc8ba940 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -219,8 +219,8 @@ The following parameters are supported: See <> for allowed settings. `transpositions`:: - Sets if transpositions should be counted - as one or two changes, defaults to `true` + if set to `true`, transpositions are counted + as one change instead of two, defaults to `true` `min_length`:: Minimum length of the input before fuzzy diff --git a/docs/reference/setup/backup.asciidoc b/docs/reference/setup/backup.asciidoc index 11bb7d3f395..44a0a118bc0 100644 --- a/docs/reference/setup/backup.asciidoc +++ b/docs/reference/setup/backup.asciidoc @@ -29,7 +29,7 @@ To back up a running 0.90.x system: This will prevent indices from being flushed to disk while the backup is in process: -[source,json] +[source,js] ----------------------------------- PUT /_all/_settings { @@ -45,7 +45,7 @@ PUT /_all/_settings This will prevent the cluster from moving data files from one node to another while the backup is in process: -[source,json] +[source,js] ----------------------------------- PUT /_cluster/settings { @@ -67,7 +67,7 @@ array snapshots, backup software). When the backup is complete and data no longer needs to be read from the Elasticsearch data path, allocation and index flushing must be re-enabled: -[source,json] +[source,js] ----------------------------------- PUT /_all/_settings { diff --git a/docs/reference/setup/cluster_restart.asciidoc b/docs/reference/setup/cluster_restart.asciidoc index 03c34121ee4..5e1a55eed1c 100644 --- a/docs/reference/setup/cluster_restart.asciidoc +++ b/docs/reference/setup/cluster_restart.asciidoc @@ -14,7 +14,7 @@ replicate the shards that were on that node to other nodes in the cluster, causing a lot of wasted I/O. This can be avoided by disabling allocation before shutting down a node: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { @@ -27,7 +27,7 @@ PUT /_cluster/settings If upgrading from 0.90.x to 1.x, then use these settings instead: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { @@ -103,7 +103,7 @@ allows the master to allocate replicas to nodes which already have local shard copies. At this point, with all the nodes in the cluster, it is safe to reenable shard allocation: -[source,json] +[source,js] ------------------------------------------------------ PUT /_cluster/settings { @@ -116,7 +116,7 @@ PUT /_cluster/settings If upgrading from 0.90.x to 1.x, then use these settings instead: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 0e08fdf83f0..0719b1c60d3 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -56,7 +56,7 @@ operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. On Linux, you can increase the limits by running the following command as `root`: -[source,bash] +[source,sh] ------------------------------------- sysctl -w vm.max_map_count=262144 ------------------------------------- diff --git a/docs/reference/setup/rolling_upgrade.asciidoc b/docs/reference/setup/rolling_upgrade.asciidoc index a3e35da0ae3..5792c34cdbc 100644 --- a/docs/reference/setup/rolling_upgrade.asciidoc +++ b/docs/reference/setup/rolling_upgrade.asciidoc @@ -19,7 +19,7 @@ replicate the shards that were on that node to other nodes in the cluster, causing a lot of wasted I/O. This can be avoided by disabling allocation before shutting down a node: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { @@ -38,7 +38,7 @@ You may happily continue indexing during the upgrade. However, shard recovery will be much faster if you temporarily stop non-essential indexing and issue a <> request: -[source,json] +[source,js] -------------------------------------------------- POST /_flush/synced -------------------------------------------------- @@ -105,7 +105,7 @@ GET _cat/nodes Once the node has joined the cluster, reenable shard allocation to start using the node: -[source,json] +[source,js] -------------------------------------------------- PUT /_cluster/settings { diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index 72bdec2a927..15cd90f98dd 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -29,6 +29,8 @@ consult this table: |1.x |2.x |<> |======================================================================= +TIP: Take plugins into consideration as well when upgrading. Most plugins will have to be upgraded alongside Elasticsearch, although some plugins accessed primarily through the browser (`_site` plugins) may continue to work given that API changes are compatible. + include::backup.asciidoc[] include::rolling_upgrade.asciidoc[] diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 8497a02567c..2a055611935 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -88,6 +88,19 @@ Further issues remain with the retry mechanism: See {GIT}9967[#9967]. (STATUS: ONGOING) +[float] +=== Wait on incoming joins before electing local node as master (STATUS: ONGOING) + +During master election each node pings in order to discover other nodes and validate the liveness of existing +nodes. Based on this information the node either discovers an existing master or, if enough nodes are found +(see https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#master-election[`discovery.zen.minimum_master_nodes`]) a new master will be elected. Currently, the node that is +elected as master will update the cluster state to indicate the result of the election. Other nodes will submit +a join request to the newly elected master node. Instead of immediately processing the election result, the elected master +node should wait for the incoming joins from other nodes, thus validating that the result of the election is properly applied. As soon as enough +nodes have sent their joins request (based on the `minimum_master_nodes` settings) the cluster state is updated. +{GIT}12161[#12161] + + [float] === Write index metadata on data nodes where shards allocated (STATUS: ONGOING) @@ -127,7 +140,7 @@ will operate with its local data type but, if the shard is relocated, the data type from the cluster state will be applied to the new shard, which can result in a corrupt shard. To prevent this, new fields should not be added to a shard's mapping until confirmed by the master. -{GIT}8688[#8688] (STATUS: ONGOING) +{GIT}8688[#8688] (STATUS: DONE) [float] === Loss of documents during network partition (STATUS: ONGOING) @@ -161,7 +174,7 @@ Make write calls return the number of total/successful/missing shards in the sam [float] === Jepsen Test Failures (STATUS: ONGOING) -We have increased our test coverage to include scenarios tested by Jepsen. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. You can follow the work on the master branch of the https://github.com/elasticsearch/elasticsearch/blob/master/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptions.java[`DiscoveryWithServiceDisruptions` class], where we will add more tests as time progresses. +We have increased our test coverage to include scenarios tested by Jepsen. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. You can follow the work on the master branch of the https://github.com/elastic/elasticsearch/blob/master/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java[`DiscoveryWithServiceDisruptions` class], where we will add more tests as time progresses. [float] === Document guarantees and handling of failure (STATUS: ONGOING) diff --git a/plugins/analysis-icu/pom.xml b/plugins/analysis-icu/pom.xml index 42b3f437c88..f7103c3a979 100644 --- a/plugins/analysis-icu/pom.xml +++ b/plugins/analysis-icu/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-analysis-icu diff --git a/plugins/analysis-kuromoji/pom.xml b/plugins/analysis-kuromoji/pom.xml index 66d4cbd5baf..a8014bf6f08 100644 --- a/plugins/analysis-kuromoji/pom.xml +++ b/plugins/analysis-kuromoji/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-analysis-kuromoji diff --git a/plugins/analysis-phonetic/pom.xml b/plugins/analysis-phonetic/pom.xml index c783a91a174..97db6fa2412 100644 --- a/plugins/analysis-phonetic/pom.xml +++ b/plugins/analysis-phonetic/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-analysis-phonetic diff --git a/plugins/analysis-smartcn/pom.xml b/plugins/analysis-smartcn/pom.xml index 0c279084f8e..8145316eb69 100644 --- a/plugins/analysis-smartcn/pom.xml +++ b/plugins/analysis-smartcn/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-analysis-smartcn diff --git a/plugins/analysis-stempel/pom.xml b/plugins/analysis-stempel/pom.xml index 29bdcafc38b..78f4de95f64 100644 --- a/plugins/analysis-stempel/pom.xml +++ b/plugins/analysis-stempel/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-analysis-stempel diff --git a/plugins/cloud-aws/pom.xml b/plugins/cloud-aws/pom.xml index 9035a9bfb7b..f990a565580 100644 --- a/plugins/cloud-aws/pom.xml +++ b/plugins/cloud-aws/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-cloud-aws diff --git a/plugins/cloud-azure/pom.xml b/plugins/cloud-azure/pom.xml index 86cfd96af45..22d1c860a32 100644 --- a/plugins/cloud-azure/pom.xml +++ b/plugins/cloud-azure/pom.xml @@ -18,7 +18,7 @@ governing permissions and limitations under the License. --> org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-cloud-azure diff --git a/plugins/cloud-gce/pom.xml b/plugins/cloud-gce/pom.xml index a42ccc3bba5..65ca80cc506 100644 --- a/plugins/cloud-gce/pom.xml +++ b/plugins/cloud-gce/pom.xml @@ -18,7 +18,7 @@ governing permissions and limitations under the License. --> org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-cloud-gce diff --git a/plugins/delete-by-query/pom.xml b/plugins/delete-by-query/pom.xml index c7dfe7227a3..02ef5206940 100644 --- a/plugins/delete-by-query/pom.xml +++ b/plugins/delete-by-query/pom.xml @@ -18,7 +18,7 @@ governing permissions and limitations under the License. --> org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-delete-by-query diff --git a/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java index 0a5ac78e2d1..602b0a4cabe 100644 --- a/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java +++ b/plugins/delete-by-query/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.search.*; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -59,8 +60,9 @@ public class TransportDeleteByQueryAction extends HandledTransportAction org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-lang-javascript diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index 7aa8b2bbf3f..71d519717c9 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -105,7 +105,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements } @Override - public ExecutableScript executable(Object compiledScript, Map vars) { + public ExecutableScript executable(CompiledScript compiledScript, Map vars) { Context ctx = Context.enter(); try { ctx.setWrapFactory(wrapFactory); @@ -117,14 +117,14 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements ScriptableObject.putProperty(scope, entry.getKey(), entry.getValue()); } - return new JavaScriptExecutableScript((Script) compiledScript, scope); + return new JavaScriptExecutableScript((Script) compiledScript.compiled(), scope); } finally { Context.exit(); } } @Override - public SearchScript search(final Object compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { Context ctx = Context.enter(); try { ctx.setWrapFactory(wrapFactory); @@ -148,7 +148,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements } } - return new JavaScriptSearchScript((Script) compiledScript, scope, leafLookup); + return new JavaScriptSearchScript((Script) compiledScript.compiled(), scope, leafLookup); } }; } finally { @@ -157,11 +157,11 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { Context ctx = Context.enter(); ctx.setWrapFactory(wrapFactory); try { - Script script = (Script) compiledScript; + Script script = (Script) compiledScript.compiled(); Scriptable scope = ctx.newObject(globalScope); scope.setPrototype(globalScope); scope.setParentScope(null); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java index 9bb3543b012..e596bf8b3ae 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java @@ -21,7 +21,9 @@ package org.elasticsearch.script.javascript; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.After; import org.junit.Before; @@ -55,7 +57,7 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { @Test public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.execute(se.compile("1 + 2"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile("1 + 2")), vars); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -66,20 +68,21 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.execute(se.compile("obj1"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1")), vars); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.execute(se.compile("obj1.l[0]"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1.l[0]")), vars); assertThat(((String) o), equalTo("2")); } @Test public void testJavaScriptObjectToMap() { Map vars = new HashMap(); - Object o = se.execute(se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectToMap", "js", + se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1")), vars); Map obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); @@ -94,7 +97,8 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { ctx.put("obj1", obj1); vars.put("ctx", ctx); - se.execute(se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'"), vars); + se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectMapInter", "js", + se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'")), vars); ctx = (Map) se.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); assertThat((String) ((Map) ctx.get("obj1")).get("prop1"), equalTo("uvalue1")); @@ -108,8 +112,9 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { Map doc = new HashMap(); ctx.put("doc", doc); - Object complied = se.compile("ctx.doc.field1 = ['value1', 'value2']"); - ExecutableScript script = se.executable(complied, new HashMap()); + Object compiled = se.compile("ctx.doc.field1 = ['value1', 'value2']"); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptInnerArrayCreation", "js", + compiled), new HashMap()); script.setNextVar("ctx", ctx); script.run(); @@ -125,18 +130,22 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).map(); vars.put("l", Arrays.asList("1", "2", "3", obj1)); - Object o = se.execute(se.compile("l.length"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + se.compile("l.length")), vars); assertThat(((Number) o).intValue(), equalTo(4)); - o = se.execute(se.compile("l[0]"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + se.compile("l[0]")), vars); assertThat(((String) o), equalTo("1")); - o = se.execute(se.compile("l[3]"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + se.compile("l[3]")), vars); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.execute(se.compile("l[3].prop1"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + se.compile("l[3].prop1")), vars); assertThat(((String) o), equalTo("value1")); } @@ -147,7 +156,8 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { vars.put("ctx", ctx); Object compiledScript = se.compile("ctx.value"); - ExecutableScript script = se.executable(compiledScript, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "js", + compiledScript), vars); ctx.put("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); @@ -162,7 +172,8 @@ public class JavaScriptScriptEngineTests extends ElasticsearchTestCase { Map vars = new HashMap(); Object compiledScript = se.compile("value"); - ExecutableScript script = se.executable(compiledScript, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "js", + compiledScript), vars); script.setNextVar("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTest.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTest.java index c235128e83f..baf10625cdf 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTest.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTest.java @@ -20,7 +20,9 @@ package org.elasticsearch.script.javascript; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -59,7 +61,7 @@ public class JavaScriptScriptMultiThreadedTest extends ElasticsearchTestCase { Map vars = new HashMap(); vars.put("x", x); vars.put("y", y); - ExecutableScript script = se.executable(compiled, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); for (int i = 0; i < 100000; i++) { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); @@ -100,7 +102,7 @@ public class JavaScriptScriptMultiThreadedTest extends ElasticsearchTestCase { long x = ThreadLocalRandom.current().nextInt(); Map vars = new HashMap(); vars.put("x", x); - ExecutableScript script = se.executable(compiled, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); for (int i = 0; i < 100000; i++) { long y = ThreadLocalRandom.current().nextInt(); long addition = x + y; @@ -147,7 +149,7 @@ public class JavaScriptScriptMultiThreadedTest extends ElasticsearchTestCase { long addition = x + y; runtimeVars.put("x", x); runtimeVars.put("y", y); - long result = ((Number) se.execute(compiled, runtimeVars)).longValue(); + long result = ((Number) se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), runtimeVars)).longValue(); assertThat(result, equalTo(addition)); } } catch (Throwable t) { diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java index e0b47c8a919..9cb44ef43e4 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java @@ -21,7 +21,9 @@ package org.elasticsearch.script.javascript; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import java.util.HashMap; import java.util.Map; @@ -34,32 +36,33 @@ public class SimpleBench { public static void main(String[] args) { JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); Object compiled = se.compile("x + y"); + CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled); Map vars = new HashMap(); // warm up for (int i = 0; i < 1000; i++) { vars.put("x", i); vars.put("y", i + 1); - se.execute(compiled, vars); + se.execute(compiledScript, vars); } final long ITER = 100000; StopWatch stopWatch = new StopWatch().start(); for (long i = 0; i < ITER; i++) { - se.execute(compiled, vars); + se.execute(compiledScript, vars); } System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime()); stopWatch = new StopWatch().start(); - ExecutableScript executableScript = se.executable(compiled, vars); + ExecutableScript executableScript = se.executable(compiledScript, vars); for (long i = 0; i < ITER; i++) { executableScript.run(); } System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime()); stopWatch = new StopWatch().start(); - executableScript = se.executable(compiled, vars); + executableScript = se.executable(compiledScript, vars); for (long i = 0; i < ITER; i++) { for (Map.Entry entry : vars.entrySet()) { executableScript.setNextVar(entry.getKey(), entry.getValue()); diff --git a/plugins/lang-python/pom.xml b/plugins/lang-python/pom.xml index 86458279318..fb4529571b3 100644 --- a/plugins/lang-python/pom.xml +++ b/plugins/lang-python/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT elasticsearch-lang-python diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index 6138453925e..3e3fb7f542e 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -78,26 +78,26 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri } @Override - public ExecutableScript executable(Object compiledScript, Map vars) { - return new PythonExecutableScript((PyCode) compiledScript, vars); + public ExecutableScript executable(CompiledScript compiledScript, Map vars) { + return new PythonExecutableScript((PyCode) compiledScript.compiled(), vars); } @Override - public SearchScript search(final Object compiledScript, final SearchLookup lookup, @Nullable final Map vars) { + public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map vars) { return new SearchScript() { @Override public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { final LeafSearchLookup leafLookup = lookup.getLeafSearchLookup(context); - return new PythonSearchScript((PyCode) compiledScript, vars, leafLookup); + return new PythonSearchScript((PyCode) compiledScript.compiled(), vars, leafLookup); } }; } @Override - public Object execute(Object compiledScript, Map vars) { + public Object execute(CompiledScript compiledScript, Map vars) { PyObject pyVars = Py.java2py(vars); interp.setLocals(pyVars); - PyObject ret = interp.eval((PyCode) compiledScript); + PyObject ret = interp.eval((PyCode) compiledScript.compiled()); if (ret == null) { return null; } diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java index 1621d22ac01..94528f2b0e1 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java @@ -21,7 +21,9 @@ package org.elasticsearch.script.python; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.After; import org.junit.Before; @@ -57,7 +59,7 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { @Test public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.execute(se.compile("1 + 2"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile("1 + 2")), vars); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -68,13 +70,13 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.execute(se.compile("obj1"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1")), vars); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.execute(se.compile("obj1['l'][0]"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1['l'][0]")), vars); assertThat(((String) o), equalTo("2")); } @@ -87,7 +89,8 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { ctx.put("obj1", obj1); vars.put("ctx", ctx); - se.execute(se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'"), vars); + se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testObjectInterMap", "python", + se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'")), vars); ctx = (Map) se.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); assertThat((String) ((Map) ctx.get("obj1")).get("prop1"), equalTo("uvalue1")); @@ -106,15 +109,15 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { // Object o = se.execute(se.compile("l.length"), vars); // assertThat(((Number) o).intValue(), equalTo(4)); - Object o = se.execute(se.compile("l[0]"), vars); + Object o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[0]")), vars); assertThat(((String) o), equalTo("1")); - o = se.execute(se.compile("l[3]"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]")), vars); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.execute(se.compile("l[3]['prop1']"), vars); + o = se.execute(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]['prop1']")), vars); assertThat(((String) o), equalTo("value1")); } @@ -125,7 +128,7 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { vars.put("ctx", ctx); Object compiledScript = se.compile("ctx['value']"); - ExecutableScript script = se.executable(compiledScript, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "python", compiledScript), vars); ctx.put("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); @@ -141,7 +144,7 @@ public class PythonScriptEngineTests extends ElasticsearchTestCase { Map ctx = new HashMap(); Object compiledScript = se.compile("value"); - ExecutableScript script = se.executable(compiledScript, vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "python", compiledScript), vars); script.setNextVar("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTest.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTest.java index 9d53507388b..13d1d2b7a05 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTest.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTest.java @@ -20,7 +20,9 @@ package org.elasticsearch.script.python; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.After; import org.junit.Test; @@ -50,6 +52,7 @@ public class PythonScriptMultiThreadedTest extends ElasticsearchTestCase { public void testExecutableNoRuntimeParams() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); final Object compiled = se.compile("x + y"); + final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[4]; @@ -67,7 +70,7 @@ public class PythonScriptMultiThreadedTest extends ElasticsearchTestCase { Map vars = new HashMap(); vars.put("x", x); vars.put("y", y); - ExecutableScript script = se.executable(compiled, vars); + ExecutableScript script = se.executable(compiledScript, vars); for (int i = 0; i < 10000; i++) { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); @@ -136,6 +139,7 @@ public class PythonScriptMultiThreadedTest extends ElasticsearchTestCase { public void testExecute() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); final Object compiled = se.compile("x + y"); + final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecute", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[4]; @@ -154,7 +158,7 @@ public class PythonScriptMultiThreadedTest extends ElasticsearchTestCase { long addition = x + y; runtimeVars.put("x", x); runtimeVars.put("y", y); - long result = ((Number) se.execute(compiled, runtimeVars)).longValue(); + long result = ((Number) se.execute(compiledScript, runtimeVars)).longValue(); assertThat(result, equalTo(addition)); } } catch (Throwable t) { diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java index 583bab163fa..4fab7dd8fb9 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java @@ -21,7 +21,9 @@ package org.elasticsearch.script.python; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import java.util.HashMap; import java.util.Map; @@ -34,32 +36,34 @@ public class SimpleBench { public static void main(String[] args) { PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); Object compiled = se.compile("x + y"); + CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "SimpleBench", "python", compiled); + Map vars = new HashMap(); // warm up for (int i = 0; i < 1000; i++) { vars.put("x", i); vars.put("y", i + 1); - se.execute(compiled, vars); + se.execute(compiledScript, vars); } final long ITER = 100000; StopWatch stopWatch = new StopWatch().start(); for (long i = 0; i < ITER; i++) { - se.execute(compiled, vars); + se.execute(compiledScript, vars); } System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime()); stopWatch = new StopWatch().start(); - ExecutableScript executableScript = se.executable(compiled, vars); + ExecutableScript executableScript = se.executable(compiledScript, vars); for (long i = 0; i < ITER; i++) { executableScript.run(); } System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime()); stopWatch = new StopWatch().start(); - executableScript = se.executable(compiled, vars); + executableScript = se.executable(compiledScript, vars); for (long i = 0; i < ITER; i++) { for (Map.Entry entry : vars.entrySet()) { executableScript.setNextVar(entry.getKey(), entry.getValue()); diff --git a/plugins/pom.xml b/plugins/pom.xml index f1ec7d66ef8..78bca543737 100644 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -7,7 +7,7 @@ org.elasticsearch.plugin elasticsearch-plugin - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT pom Elasticsearch Plugin POM 2009 @@ -15,14 +15,12 @@ org.elasticsearch elasticsearch-parent - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT ${basedir}/src/main/assemblies/plugin.xml false - ${elasticsearch.tools.directory}/ant/integration-tests.xml - ${elasticsearch.integ.antfile.default} @@ -43,12 +41,6 @@ test-jar test - - org.elasticsearch - elasticsearch - zip - test - @@ -315,8 +307,7 @@ - + @@ -329,22 +320,14 @@ - + + + - - org.apache.maven.plugins - maven-failsafe-plugin - - - 127.0.0.1:9300 - - - diff --git a/pom.xml b/pom.xml index 97a55f618fd..f6ba66bbe65 100644 --- a/pom.xml +++ b/pom.xml @@ -6,7 +6,7 @@ org.elasticsearch elasticsearch-parent - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT pom Elasticsearch Parent POM Elasticsearch Parent POM @@ -35,7 +35,7 @@ UTF-8 - 2.0.0-SNAPSHOT + ${project.version} ${java.home}${file.separator}bin${file.separator}java 1.7 1.7 @@ -43,7 +43,7 @@ 5.2.1 5.2.1 - 2.1.14 + 2.1.15 2.5.3 1.6.2 1.2.17 @@ -53,6 +53,8 @@ unshaded ${elasticsearch.tools.directory}/license-check/elasticsearch_license_header.txt ${elasticsearch.tools.directory}/license-check/license_header_definition.xml + ${elasticsearch.tools.directory}/ant/integration-tests.xml + ${elasticsearch.integ.antfile.default} auto @@ -105,7 +107,9 @@ ${skipTests} ${skipTests} ${project.build.directory}/integ-tests + ${project.build.directory}/integ-deps ${integ.scratch}/temp + \bno(n|)commit\b @@ -375,7 +379,7 @@ io.netty netty - 3.10.0.Final + 3.10.3.Final @@ -407,7 +411,7 @@ org.codehaus.groovy groovy-all - 2.4.0 + 2.4.4 indy @@ -455,10 +459,18 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + org.apache.maven.plugins maven-compiler-plugin + + org.apache.maven.plugins + maven-dependency-plugin + org.codehaus.mojo buildnumber-maven-plugin @@ -467,6 +479,11 @@ com.carrotsearch.randomizedtesting junit4-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + org.apache.maven.plugins maven-source-plugin @@ -501,11 +518,6 @@ maven-deploy-plugin 2.8.2 - - org.apache.maven.plugins - maven-install-plugin - 2.5.2 - org.apache.maven.plugins maven-gpg-plugin @@ -529,6 +541,13 @@ + + print-versions + validate + + display-info + + @@ -554,6 +573,116 @@ com.carrotsearch.randomizedtesting junit4-maven-plugin ${testframework.version} + + + ${jvm.executable} + ${tests.jvm.argline} + 10 + warn + true + ${tests.jvms} + ${tests.ifNoTests} + + + + + + + + + + + + -Xmx${tests.heap.size} + -Xms${tests.heap.size} + ${java.permGenSpace} + -XX:MaxDirectMemorySize=512m + -Des.logger.prefix= + -XX:+HeapDumpOnOutOfMemoryError + -XX:HeapDumpPath=${tests.heapdump.path} + + ${tests.shuffle} + ${tests.verbose} + ${tests.seed} + + true + + + ./temp + + ${tests.bwc} + ${tests.bwc.path} + ${tests.bwc.version} + ${tests.jvm.argline} + ${tests.appendseed} + ${tests.cluster} + ${tests.iters} + ${tests.maxfailures} + ${tests.failfast} + ${tests.class} + ${tests.method} + ${tests.nightly} + ${tests.verbose} + ${tests.badapples} + ${tests.weekly} + ${tests.failfast} + ${tests.awaitsfix} + ${tests.slow} + ${tests.timeoutSuite} + ${tests.showSuccess} + ${tests.integration} + ${tests.thirdparty} + ${tests.config} + ${tests.client.ratio} + ${tests.enable_mock_modules} + ${tests.assertion.disabled} + ${tests.rest} + ${tests.rest.suite} + ${tests.rest.blacklist} + ${tests.rest.spec} + ${tests.network} + ${tests.heap.size} + ${tests.filter} + ${elasticsearch.version} + ${tests.locale} + ${tests.rest.load_packaged} + ${tests.timezone} + ${env.ES_TEST_LOCAL} + ${es.node.mode} + ${es.logger.level} + ${tests.security.manager} + ${tests.compatibility} + true + + true + + + + + + + + + + + + + + + + + + tests @@ -562,51 +691,11 @@ junit4 - ${jvm.executable} - ${tests.jvm.argline} ${skip.unit.tests} - 10 - warn - true - ${tests.ifNoTests} - - - - - - - - - - - - - - - - - - - - ${tests.jvms} - - - - - **/*Tests.class **/*Test.class @@ -615,72 +704,35 @@ **/Abstract*.class **/*StressTest.class - - -Xmx${tests.heap.size} - -Xms${tests.heap.size} - ${java.permGenSpace} - -XX:MaxDirectMemorySize=512m - -Des.logger.prefix= - -XX:+HeapDumpOnOutOfMemoryError - -XX:HeapDumpPath=${tests.heapdump.path} - - ${tests.shuffle} - ${tests.verbose} - ${tests.seed} - ${tests.failfast} - - true + + + + integ-tests + integration-test + + junit4 + + + false + ${skip.integ.tests} + + + + + 1 + + **/*IT.class + - - ./temp - - ${tests.bwc} - ${tests.bwc.path} - ${tests.bwc.version} - ${tests.jvm.argline} - ${tests.appendseed} - ${tests.iters} - ${tests.maxfailures} - ${tests.failfast} - ${tests.class} - ${tests.method} - ${tests.nightly} - ${tests.verbose} - ${tests.badapples} - ${tests.weekly} - ${tests.awaitsfix} - ${tests.slow} - ${tests.timeoutSuite} - ${tests.showSuccess} - ${tests.integration} - ${tests.thirdparty} - ${tests.config} - ${tests.client.ratio} - ${tests.enable_mock_modules} - ${tests.assertion.disabled} - ${tests.rest} - ${tests.rest.suite} - ${tests.rest.blacklist} - ${tests.rest.spec} - ${tests.network} - ${tests.cluster} - ${tests.heap.size} - ${tests.filter} - ${elasticsearch.version} - ${tests.locale} - ${tests.rest.load_packaged} - ${tests.timezone} - ${project.basedir} - ${settings.localRepository} - ${env.ES_TEST_LOCAL} - ${es.node.mode} - ${es.logger.level} - ${tests.security.manager} - ${tests.compatibility} - true - - true + + true + + 127.0.0.1:9300 + + true @@ -700,23 +752,9 @@ maven-failsafe-plugin 2.18.1 - ${skip.integ.tests} - - ${es.logger.level} - ${tests.rest.suite} - ${tests.rest.blacklist} - ${tests.rest.spec} - ${tests.rest.load_packaged} - ${integ.temp} - + ${skip.integ.tests} - - integration-test - - integration-test - - verify @@ -858,10 +896,31 @@ - org.apache.maven.plugins maven-dependency-plugin 2.10 + + + integ-setup-dependencies + pre-integration-test + + copy + + + + + org.elasticsearch + elasticsearch + ${elasticsearch.version} + zip + true + + + true + ${integ.deps} + + + @@ -929,115 +988,6 @@ - - - - org.eclipse.m2e - lifecycle-mapping - 1.0.0 - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - [1.0.0,) - - copy-dependencies - - - - - - - - - - de.thetaphi - forbiddenapis - [1.0.0,) - - testCheck - check - - - - - - - - - - exec-maven-plugin - org.codehaus.mojo - [1.0.0,) - - exec - - - - - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - [1.0.0,) - - enforce - - - - - - - - - org.apache.maven.plugins - maven-antrun-plugin - [1.0.0,) - - run - - - - - - - - - org.apache.maven.plugins - maven-resources-plugin - [1.0.0,) - - copy-resources - - - - - - - - - com.mycila - license-maven-plugin - [1.0.0,) - - check - - - - - - - - - - org.apache.maven.plugins maven-eclipse-plugin @@ -1185,18 +1135,6 @@ org.eclipse.jdt.ui.text.custom_code_templates= - - print-jvm - validate - - run - - - - Using ${java.runtime.name} ${java.runtime.version} ${java.vendor} - - - set-permgen validate @@ -1235,8 +1173,8 @@ org.eclipse.jdt.ui.text.custom_code_templates= - + @@ -1268,7 +1206,7 @@ org.eclipse.jdt.ui.text.custom_code_templates= - + @@ -1308,8 +1246,23 @@ org.eclipse.jdt.ui.text.custom_code_templates=maven-checkstyle-plugin 2.15 + + org.apache.maven.plugins + maven-install-plugin + 2.5.2 + + true + + + + + org.springframework.build + aws-maven + 5.0.0.RELEASE + + @@ -1326,6 +1279,50 @@ org.eclipse.jdt.ui.text.custom_code_templates= + + release + + true + \bno(n|)(release|commit)\b + org.apache.lucene.util.LuceneTestCase$AwaitsFix @ Please fix all bugs before release or mark them as ignored + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.6 + + + sign-artifacts + verify + + sign + + + ${gpg.keyname} + ${gpg.passphrase} + ${gpg.keyring} + + + + + + + + + aws-release + AWS Release Repository + s3://download.elasticsearch.org/elasticsearch/release + + + aws-snapshot + AWS Snapshot Repository + s3://download.elasticsearch.org/elasticsearch/snapshot + + + license @@ -1492,7 +1489,6 @@ org.eclipse.jdt.ui.text.custom_code_templates= - securemock dev-tools rest-api-spec plugins diff --git a/rest-api-spec/pom.xml b/rest-api-spec/pom.xml index 46f2e6e7213..3b14e1722cd 100644 --- a/rest-api-spec/pom.xml +++ b/rest-api-spec/pom.xml @@ -2,7 +2,7 @@ 4.0.0 org.elasticsearch elasticsearch-rest-api-spec - 2.0.0-SNAPSHOT + 2.0.0-beta1-SNAPSHOT Elasticsearch Rest API Spec org.sonatype.oss diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json new file mode 100644 index 00000000000..0c179c6c7ac --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json @@ -0,0 +1,41 @@ +{ + "indices.shard_stores": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shard-stores.html", + "methods": ["GET"], + "url": { + "path": "/_shard_stores", + "paths": ["/_shard_stores", "/{index}/_shard_stores"], + "parts": { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + }, + "params": { + "status" : { + "type" : "list", + "options" : ["green", "yellow", "red", "all"], + "description" : "A comma-separated list of statuses used to filter on shards to get store information for" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "operation_threading": { + "description" : "TODO: ?" + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml index 5b2ddb7fc38..12545f0a64b 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml @@ -8,7 +8,7 @@ - match: $body: | / #host ip heap.percent ram.percent load node.role master name - ^ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ \d*(\.\d+)? \s+ [-dc] \s+ [-*mx] \s+ (\S+\s?)+ \s+ \n)+ $/ + ^ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d*(\.\d+)? \s+ [-dc] \s+ [-*mx] \s+ (\S+\s?)+ \s+ \n)+ $/ - do: cat.nodes: @@ -17,7 +17,7 @@ - match: $body: | /^ host \s+ ip \s+ heap\.percent \s+ ram\.percent \s+ load \s+ node\.role \s+ master \s+ name \s+ \n - (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ \d*(\.\d+)? \s+ [-dc] \s+ [-*mx] \s+ (\S+\s?)+ \s+ \n)+ $/ + (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d*(\.\d+)? \s+ [-dc] \s+ [-*mx] \s+ (\S+\s?)+ \s+ \n)+ $/ - do: cat.nodes: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index 0efb307e133..2838dcb1a55 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -60,6 +60,9 @@ search.query_current .+ \n search.query_time .+ \n search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n segments.count .+ \n segments.memory .+ \n segments.index_writer_memory .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yaml new file mode 100644 index 00000000000..2826dd85371 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shard_stores/10_basic.yaml @@ -0,0 +1,86 @@ +--- +"no indices test": + - do: + indices.shard_stores: + allow_no_indices: true + + - match: { indices: {}} + + - do: + catch: missing + indices.shard_stores: + allow_no_indices: false + +--- +"basic index test": + + - do: + indices.create: + index: index1 + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + - do: + index: + index: index1 + type: type + body: { foo: bar } + refresh: true + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.shard_stores: + index: index1 + status: "green" + + - match: { indices.index1.shards.0.stores.0.allocation: "primary" } + - gte: { indices.index1.shards.0.stores.0.version: 0 } + +--- +"multiple indices test": + + - do: + indices.create: + index: index1 + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + - do: + indices.create: + index: index2 + body: + settings: + number_of_shards: "2" + number_of_replicas: "0" + - do: + index: + index: index1 + type: type + body: { foo: bar } + refresh: true + - do: + index: + index: index2 + type: type + body: { foo: bar } + refresh: true + - do: + cluster.health: + wait_for_status: green + + - do: + indices.shard_stores: + status: "green" + + - match: { indices.index1.shards.0.stores.0.allocation: "primary" } + - gte: { indices.index1.shards.0.stores.0.version: 0 } + - match: { indices.index2.shards.0.stores.0.allocation: "primary" } + - gte: { indices.index2.shards.0.stores.0.version: 0 } + - match: { indices.index2.shards.1.stores.0.allocation: "primary" } + - gte: { indices.index2.shards.1.stores.0.version: 0 } + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml index 9f949c21cd6..fef50208749 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mpercolate/10_basic.yaml @@ -37,5 +37,7 @@ foo: bar - match: { responses.0.total: 1 } - - match: { responses.1.error: "/IndexMissingException.no.such.index./" } + - match: { responses.1.error.root_cause.0.type: index_not_found_exception } + - match: { responses.1.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.1.error.root_cause.0.index: percolator_index1 } - match: { responses.2.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml index 76ef7c42303..8b736b860bd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml @@ -39,7 +39,7 @@ match: {foo: bar} - match: { responses.0.hits.total: 3 } - - match: { responses.1.error.root_cause.0.type: index_missing_exception } + - match: { responses.1.error.root_cause.0.type: index_not_found_exception } - match: { responses.1.error.root_cause.0.reason: "/no.such.index/" } - match: { responses.1.error.root_cause.0.index: test_2 } - match: { responses.2.hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.restore/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.restore/10_basic.yaml index 41944a13342..d471853a95b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.restore/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.restore/10_basic.yaml @@ -15,11 +15,11 @@ setup: body: settings: number_of_shards: 1 - number_of_replicas: 1 + number_of_replicas: 0 - do: cluster.health: - wait_for_status: yellow + wait_for_status: green --- "Create a snapshot and then restore it": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/template/20_search.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/template/20_search.yaml index d8e7364d545..5153f6cde1f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/template/20_search.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/template/20_search.yaml @@ -37,7 +37,7 @@ - match: { hits.total: 1 } - do: - catch: /Unable.to.find.on.disk.script.simple1/ + catch: /Unable.to.find.on.disk.file.script.\[simple1\].using.lang.\[mustache\]/ search_template: body: { "template" : "simple1" } diff --git a/securemock/pom.xml b/securemock/pom.xml deleted file mode 100644 index 8c2faa704bc..00000000000 --- a/securemock/pom.xml +++ /dev/null @@ -1,107 +0,0 @@ - - 4.0.0 - org.elasticsearch - elasticsearch-securemock - 1.0-SNAPSHOT - jar - Elasticsearch Securemock - Allows creating mocks in tests without having to grant dangerous permissions to all of your code. - 2015 - - - org.sonatype.oss - oss-parent - 7 - - - - - The Apache Software License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - scm:git:git@github.com:elastic/elasticsearch.git - scm:git:git@github.com:elastic/elasticsearch.git - http://github.com/elastic/elasticsearch/securemock - - - - UTF-8 - 1.7 - 1.7 - - - - - oss-snapshots - Sonatype OSS Snapshots - https://oss.sonatype.org/content/repositories/snapshots/ - - - - - - org.mockito - mockito-core - 1.9.5 - - - org.objenesis - objenesis - 2.1 - - - junit - junit - 4.11 - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 2.4 - - - package - - shade - - - - - junit:junit - org.hamcrest:hamcrest-core - - - - - - org.mockito - org.elasticsearch.mock.orig - - org.mockito.Mockito - - - - org.elasticsearch.mock - org.mockito - - org.elasticsearch.mock.Mockito* - - - - - - - - - - diff --git a/securemock/src/main/java/org/elasticsearch/mock/Mockito.java b/securemock/src/main/java/org/elasticsearch/mock/Mockito.java deleted file mode 100644 index b5abe867d16..00000000000 --- a/securemock/src/main/java/org/elasticsearch/mock/Mockito.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.mock; - -import org.mockito.InOrder; -import org.mockito.Matchers; -import org.mockito.MockSettings; -import org.mockito.MockingDetails; -import org.mockito.ReturnValues; -import org.mockito.stubbing.Answer; -import org.mockito.stubbing.DeprecatedOngoingStubbing; -import org.mockito.stubbing.OngoingStubbing; -import org.mockito.stubbing.Stubber; -import org.mockito.stubbing.VoidMethodStubbable; -import org.mockito.verification.VerificationMode; -import org.mockito.verification.VerificationWithTimeout; - -import java.security.AccessController; -import java.security.PrivilegedAction; - -/** - * Wraps Mockito API with calls to AccessController. - *

- * This is useful if you want to mock in a securitymanager environment, - * but contain the permissions to only mocking test libraries. - *

- * Instead of: - *

- * grant {
- *   permission java.lang.RuntimePermission "reflectionFactoryAccess";
- * };
- * 
- * You can just change maven dependencies to use securemock.jar, and then: - *
- * grant codeBase "/url/to/securemock.jar" {
- *   permission java.lang.RuntimePermission "reflectionFactoryAccess";
- * };
- * 
- */ -public class Mockito extends Matchers { - - public static final Answer RETURNS_DEFAULTS = org.mockito.Mockito.RETURNS_DEFAULTS; - public static final Answer RETURNS_SMART_NULLS = org.mockito.Mockito.RETURNS_SMART_NULLS; - public static final Answer RETURNS_MOCKS = org.mockito.Mockito.RETURNS_MOCKS; - public static final Answer RETURNS_DEEP_STUBS = org.mockito.Mockito.RETURNS_DEEP_STUBS; - public static final Answer CALLS_REAL_METHODS = org.mockito.Mockito.CALLS_REAL_METHODS; - - public static T mock(final Class classToMock) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.mock(classToMock); - } - }); - } - - public static T mock(final Class classToMock, final String name) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.mock(classToMock, name); - } - }); - } - - public static MockingDetails mockingDetails(final Object toInspect) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public MockingDetails run() { - return org.mockito.Mockito.mockingDetails(toInspect); - } - }); - } - - @Deprecated - public static T mock(final Class classToMock, final ReturnValues returnValues) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.mock(classToMock, returnValues); - } - }); - } - - public static T mock(final Class classToMock, final Answer defaultAnswer) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.mock(classToMock, defaultAnswer); - } - }); - } - - public static T mock(final Class classToMock, final MockSettings mockSettings) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.mock(classToMock, mockSettings); - } - }); - } - - public static T spy(final T object) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.spy(object); - } - }); - } - - public static DeprecatedOngoingStubbing stub(final T methodCall) { - return AccessController.doPrivileged(new PrivilegedAction>() { - @Override - public DeprecatedOngoingStubbing run() { - return org.mockito.Mockito.stub(methodCall); - } - }); - } - - public static OngoingStubbing when(final T methodCall) { - return AccessController.doPrivileged(new PrivilegedAction>() { - @Override - public OngoingStubbing run() { - return org.mockito.Mockito.when(methodCall); - } - }); - } - - public static T verify(final T mock) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.verify(mock); - } - }); - } - - public static T verify(final T mock, final VerificationMode mode) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public T run() { - return org.mockito.Mockito.verify(mock, mode); - } - }); - } - - public static void reset(final T ... mocks) { - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - org.mockito.Mockito.reset(mocks); - return null; - } - }); - } - - public static void verifyNoMoreInteractions(final Object... mocks) { - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - org.mockito.Mockito.verifyNoMoreInteractions(mocks); - return null; - } - }); - } - - public static void verifyZeroInteractions(final Object... mocks) { - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - org.mockito.Mockito.verifyZeroInteractions(mocks); - return null; - } - }); - } - - @Deprecated - public static VoidMethodStubbable stubVoid(final T mock) { - return AccessController.doPrivileged(new PrivilegedAction>() { - @Override - public VoidMethodStubbable run() { - return org.mockito.Mockito.stubVoid(mock); - } - }); - } - - public static Stubber doThrow(final Throwable toBeThrown) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doThrow(toBeThrown); - } - }); - } - - public static Stubber doThrow(final Class toBeThrown) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doThrow(toBeThrown); - } - }); - } - - public static Stubber doCallRealMethod() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doCallRealMethod(); - } - }); - } - - public static Stubber doAnswer(final Answer answer) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doAnswer(answer); - } - }); - } - - public static Stubber doNothing() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doNothing(); - } - }); - } - - public static Stubber doReturn(final Object toBeReturned) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Stubber run() { - return org.mockito.Mockito.doReturn(toBeReturned); - } - }); - } - - public static InOrder inOrder(final Object... mocks) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public InOrder run() { - return org.mockito.Mockito.inOrder(mocks); - } - }); - } - - public static Object[] ignoreStubs(final Object... mocks) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Object[] run() { - return org.mockito.Mockito.ignoreStubs(mocks); - } - }); - } - - public static VerificationMode times(final int wantedNumberOfInvocations) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.times(wantedNumberOfInvocations); - } - }); - } - - public static VerificationMode never() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.never(); - } - }); - } - - public static VerificationMode atLeastOnce() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.atLeastOnce(); - } - }); - } - - public static VerificationMode atLeast(final int minNumberOfInvocations) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.atLeast(minNumberOfInvocations); - } - }); - } - - public static VerificationMode atMost(final int maxNumberOfInvocations) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.atMost(maxNumberOfInvocations); - } - }); - } - - public static VerificationMode calls(final int wantedNumberOfInvocations) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.calls(wantedNumberOfInvocations); - } - }); - } - - public static VerificationMode only() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationMode run() { - return org.mockito.Mockito.only(); - } - }); - } - - public static VerificationWithTimeout timeout(final int millis) { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public VerificationWithTimeout run() { - return org.mockito.Mockito.timeout(millis); - } - }); - } - - public static void validateMockitoUsage() { - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - org.mockito.Mockito.validateMockitoUsage(); - return null; - } - }); - } - - public static MockSettings withSettings() { - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public MockSettings run() { - return org.mockito.Mockito.withSettings(); - } - }); - } -}