diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 05ca294a9f0..d56bdeb537f 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -482,7 +482,6 @@ - diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt index 0e5ce884d9d..e31a7020282 100644 --- a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -31,5 +31,3 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() @defaultMessage Soon to be removed org.apache.lucene.document.FieldType#numericType() - -org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d9e3908df22..f757eb8eef6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 5.0.0 -lucene = 6.0.1 +lucene = 6.1.0-snapshot-3a57bea # optional dependencies spatial4j = 0.6 diff --git a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java b/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java deleted file mode 100644 index 580b875ce2c..00000000000 --- a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.document; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; - -import org.apache.lucene.search.Query; -import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.common.SuppressForbidden; - -/** - * Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and - * LUCENE-7234 are released. - */ -// TODO: remove me when we upgrade to Lucene 6.1 -@SuppressForbidden(reason="uses InetAddress.getHostAddress") -public final class XInetAddressPoint { - - private XInetAddressPoint() {} - - /** The minimum value that an ip address can hold. */ - public static final InetAddress MIN_VALUE; - /** The maximum value that an ip address can hold. */ - public static final InetAddress MAX_VALUE; - static { - MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]); - byte[] maxValueBytes = new byte[InetAddressPoint.BYTES]; - Arrays.fill(maxValueBytes, (byte) 0xFF); - MAX_VALUE = InetAddressPoint.decode(maxValueBytes); - } - - /** - * Return the {@link InetAddress} that compares immediately greater than - * {@code address}. - * @throws ArithmeticException if the provided address is the - * {@link #MAX_VALUE maximum ip address} - */ - public static InetAddress nextUp(InetAddress address) { - if (address.equals(MAX_VALUE)) { - throw new ArithmeticException("Overflow: there is no greater InetAddress than " - + address.getHostAddress()); - } - byte[] delta = new byte[InetAddressPoint.BYTES]; - delta[InetAddressPoint.BYTES-1] = 1; - byte[] nextUpBytes = new byte[InetAddressPoint.BYTES]; - NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes); - return InetAddressPoint.decode(nextUpBytes); - } - - /** - * Return the {@link InetAddress} that compares immediately less than - * {@code address}. - * @throws ArithmeticException if the provided address is the - * {@link #MIN_VALUE minimum ip address} - */ - public static InetAddress nextDown(InetAddress address) { - if (address.equals(MIN_VALUE)) { - throw new ArithmeticException("Underflow: there is no smaller InetAddress than " - + address.getHostAddress()); - } - byte[] delta = new byte[InetAddressPoint.BYTES]; - delta[InetAddressPoint.BYTES-1] = 1; - byte[] nextDownBytes = new byte[InetAddressPoint.BYTES]; - NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes); - return InetAddressPoint.decode(nextDownBytes); - } - - /** - * Create a prefix query for matching a CIDR network range. - * - * @param field field name. must not be {@code null}. - * @param value any host address - * @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4 - * addresses. - * @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid. - * @return a query matching documents with addresses contained within this network - */ - // TODO: remove me when we upgrade to Lucene 6.0.1 - public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) { - if (value == null) { - throw new IllegalArgumentException("InetAddress must not be null"); - } - if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) { - throw new IllegalArgumentException("illegal prefixLength '" + prefixLength - + "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges"); - } - // create the lower value by zeroing out the host portion, upper value by filling it with all ones. - byte lower[] = value.getAddress(); - byte upper[] = value.getAddress(); - for (int i = prefixLength; i < 8 * lower.length; i++) { - int m = 1 << (7 - (i & 7)); - lower[i >> 3] &= ~m; - upper[i >> 3] |= m; - } - try { - return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper)); - } catch (UnknownHostException e) { - throw new AssertionError(e); // values are coming from InetAddress - } - } -} diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 564f780b8ed..a4b94b007fd 100644 --- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -283,7 +283,7 @@ public abstract class BlendedTermQuery extends Query { @Override public boolean equals(Object o) { if (this == o) return true; - if (!super.equals(o)) return false; + if (sameClassAs(o) == false) return false; BlendedTermQuery that = (BlendedTermQuery) o; return Arrays.equals(equalsTerms(), that.equalsTerms()); @@ -291,7 +291,7 @@ public abstract class BlendedTermQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms())); + return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) { diff --git a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 86982bfc949..a8b7dc9299f 100644 --- a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -44,12 +44,12 @@ public final class MinDocQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), minDoc); + return Objects.hash(classHash(), minDoc); } @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } MinDocQuery that = (MinDocQuery) obj; diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java index a9327d785e1..6017803b63d 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java @@ -63,9 +63,6 @@ import org.elasticsearch.common.io.PathUtils; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -622,8 +619,12 @@ public long ramBytesUsed() { Set seenSurfaceForms = new HashSet<>(); int dedup = 0; - while (reader.read(scratch)) { - input.reset(scratch.bytes(), 0, scratch.length()); + while (true) { + BytesRef bytes = reader.next(); + if (bytes == null) { + break; + } + input.reset(bytes.bytes, bytes.offset, bytes.length); short analyzedLength = input.readShort(); analyzed.grow(analyzedLength+2); input.readBytes(analyzed.bytes(), 0, analyzedLength); @@ -631,13 +632,13 @@ public long ramBytesUsed() { long cost = input.readInt(); - surface.bytes = scratch.bytes(); + surface.bytes = bytes.bytes; if (hasPayloads) { surface.length = input.readShort(); surface.offset = input.getPosition(); } else { surface.offset = input.getPosition(); - surface.length = scratch.length() - surface.offset; + surface.length = bytes.length - surface.offset; } if (previousAnalyzed == null) { @@ -679,11 +680,11 @@ public long ramBytesUsed() { builder.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface))); } else { int payloadOffset = input.getPosition() + surface.length; - int payloadLength = scratch.length() - payloadOffset; + int payloadLength = bytes.length - payloadOffset; BytesRef br = new BytesRef(surface.length + 1 + payloadLength); System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length); br.bytes[surface.length] = (byte) payloadSep; - System.arraycopy(scratch.bytes(), payloadOffset, br.bytes, surface.length+1, payloadLength); + System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength); br.length = br.bytes.length; builder.add(scratchInts.get(), outputs.newPair(cost, br)); } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 5bdbf76265d..6e6e82b3fc5 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -77,7 +77,7 @@ public class Version { public static final int V_5_0_0_alpha3_ID = 5000003; public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_1); + public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); public static final Version CURRENT = V_5_0_0; static { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index b76e640fa95..8e144537c84 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -36,7 +36,6 @@ import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; -import java.util.List; import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; @@ -169,7 +168,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContent { * * @return The profile results or an empty map */ - public @Nullable Map> getProfileResults() { + public @Nullable Map getProfileResults() { return internalResponse.profile(); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index afc2b77e211..4a434ebe6b7 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -33,6 +33,13 @@ public class JavaVersion implements Comparable { } private JavaVersion(List version) { + if (version.size() >= 2 + && version.get(0).intValue() == 1 + && version.get(1).intValue() == 8) { + // for Java 8 there is ambiguity since both 1.8 and 8 are supported, + // so we rewrite the former to the latter + version = new ArrayList<>(version.subList(1, version.size())); + } this.version = Collections.unmodifiableList(version); } @@ -75,6 +82,19 @@ public class JavaVersion implements Comparable { return 0; } + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != getClass()) { + return false; + } + return compareTo((JavaVersion) o) == 0; + } + + @Override + public int hashCode() { + return version.hashCode(); + } + @Override public String toString() { return version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(".")); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index d3a42a97ebb..efd525d313b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -148,18 +148,11 @@ public class ClusterChangedEvent { * has changed between the previous cluster state and the new cluster state. * Note that this is an object reference equality test, not an equals test. */ - public boolean indexMetaDataChanged(IndexMetaData current) { - MetaData previousMetaData = previousState.metaData(); - if (previousMetaData == null) { - return true; - } - IndexMetaData previousIndexMetaData = previousMetaData.index(current.getIndex()); + public static boolean indexMetaDataChanged(IndexMetaData metaData1, IndexMetaData metaData2) { + assert metaData1 != null && metaData2 != null; // no need to check on version, since disco modules will make sure to use the // same instance if its a version match - if (previousIndexMetaData == current) { - return false; - } - return true; + return metaData1 != metaData2; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 0645accb42a..b1bf01018c9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -58,13 +59,12 @@ public class NodeMappingRefreshAction extends AbstractComponent { transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } - public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) { - final DiscoveryNodes nodes = state.nodes(); - if (nodes.getMasterNode() == null) { + public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { + if (masterNode == null) { logger.warn("can't send mapping refresh for [{}], no master known.", request.index()); return; } - transportService.sendRequest(nodes.getMasterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { diff --git a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java index 9f5e5f34a1b..970adfc03fb 100644 --- a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java @@ -19,8 +19,6 @@ package org.elasticsearch.common; - -import java.io.IOException; import java.util.Base64; import java.util.Random; @@ -32,7 +30,7 @@ class RandomBasedUUIDGenerator implements UUIDGenerator { */ @Override public String getBase64UUID() { - return getBase64UUID(SecureRandomHolder.INSTANCE); + return getBase64UUID(Randomness.getSecure()); } /** @@ -49,12 +47,13 @@ class RandomBasedUUIDGenerator implements UUIDGenerator { * stamp (bits 4 through 7 of the time_hi_and_version field).*/ randomBytes[6] &= 0x0f; /* clear the 4 most significant bits for the version */ randomBytes[6] |= 0x40; /* set the version to 0100 / 0x40 */ - - /* Set the variant: + + /* Set the variant: * The high field of th clock sequence multiplexed with the variant. * We set only the MSB of the variant*/ randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */ randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/ return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes); } + } diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index ddcd3ea90f7..bb700455be5 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import java.lang.reflect.Method; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.SecureRandom; import java.util.Collections; import java.util.List; import java.util.Random; @@ -44,6 +47,7 @@ import java.util.concurrent.ThreadLocalRandom; * DiscoveryService#NODE_ID_SEED_SETTING)). */ public final class Randomness { + private static final Method currentMethod; private static final Method getRandomMethod; @@ -72,7 +76,7 @@ public final class Randomness { * @param setting the setting to access the seed * @return a reproducible source of randomness */ - public static Random get(Settings settings, Setting setting) { + public static Random get(final Settings settings, final Setting setting) { if (setting.exists(settings)) { return new Random(setting.get(settings)); } else { @@ -98,7 +102,7 @@ public final class Randomness { public static Random get() { if (currentMethod != null && getRandomMethod != null) { try { - Object randomizedContext = currentMethod.invoke(null); + final Object randomizedContext = currentMethod.invoke(null); return (Random) getRandomMethod.invoke(randomizedContext); } catch (ReflectiveOperationException e) { // unexpected, bail @@ -109,13 +113,42 @@ public final class Randomness { } } + /** + * Provides a source of randomness that is reproducible when + * running under the Elasticsearch test suite, and otherwise + * produces a non-reproducible source of secure randomness. + * Reproducible sources of randomness are created when the system + * property "tests.seed" is set and the security policy allows + * reading this system property. Otherwise, non-reproducible + * sources of secure randomness are created. + * + * @return a source of randomness + * @throws IllegalStateException if running tests but was not able + * to acquire an instance of Random from + * RandomizedContext or tests are + * running but tests.seed is not set + */ + public static Random getSecure() { + if (currentMethod != null && getRandomMethod != null) { + return get(); + } else { + return getSecureRandomWithoutSeed(); + } + } + @SuppressForbidden(reason = "ThreadLocalRandom is okay when not running tests") private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; return ThreadLocalRandom.current(); } - public static void shuffle(List list) { + private static SecureRandom getSecureRandomWithoutSeed() { + assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; + return SecureRandomHolder.INSTANCE; + } + + public static void shuffle(final List list) { Collections.shuffle(list, get()); } + } diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index 4087704d5cd..9982a08f17f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo; import java.util.ArrayList; import java.util.Collection; -import org.apache.lucene.spatial.util.GeoEncodingUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.apache.lucene.util.BitUtil; /** @@ -39,7 +39,7 @@ public class GeoHashUtils { /** maximum precision for geohash strings */ public static final int PRECISION = 12; - private static final short MORTON_OFFSET = (GeoEncodingUtils.BITS<<1) - (PRECISION*5); + private static final short MORTON_OFFSET = (GeoPointField.BITS<<1) - (PRECISION*5); // No instance: private GeoHashUtils() { @@ -51,7 +51,7 @@ public class GeoHashUtils { public static final long longEncode(final double lon, final double lat, final int level) { // shift to appropriate level final short msf = (short)(((12 - level) * 5) + MORTON_OFFSET); - return ((BitUtil.flipFlop(GeoEncodingUtils.mortonHash(lat, lon)) >>> msf) << 4) | level; + return ((BitUtil.flipFlop(GeoPointField.encodeLatLon(lat, lon)) >>> msf) << 4) | level; } /** @@ -117,7 +117,7 @@ public class GeoHashUtils { */ public static final String stringEncode(final double lon, final double lat, final int level) { // convert to geohashlong - final long ghLong = fromMorton(GeoEncodingUtils.mortonHash(lat, lon), level); + final long ghLong = fromMorton(GeoPointField.encodeLatLon(lat, lon), level); return stringEncode(ghLong); } @@ -138,7 +138,7 @@ public class GeoHashUtils { StringBuilder geoHash = new StringBuilder(); short precision = 0; - final short msf = (GeoEncodingUtils.BITS<<1)-5; + final short msf = (GeoPointField.BITS<<1)-5; long mask = 31L<>>(msf-(precision*5)))]); diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 5d1250a5148..96fe2826da8 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -19,12 +19,11 @@ package org.elasticsearch.common.geo; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.apache.lucene.util.BitUtil; import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLat; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLon; /** * @@ -84,14 +83,14 @@ public final class GeoPoint { } public GeoPoint resetFromIndexHash(long hash) { - lon = mortonUnhashLon(hash); - lat = mortonUnhashLat(hash); + lon = GeoPointField.decodeLongitude(hash); + lat = GeoPointField.decodeLatitude(hash); return this; } public GeoPoint resetFromGeoHash(String geohash) { final long hash = mortonEncode(geohash); - return this.reset(mortonUnhashLat(hash), mortonUnhashLon(hash)); + return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash)); } public GeoPoint resetFromGeoHash(long geohashLong) { @@ -164,8 +163,4 @@ public final class GeoPoint { public static GeoPoint fromGeohash(long geohashLong) { return new GeoPoint().resetFromGeoHash(geohashLong); } - - public static GeoPoint fromIndexLong(long indexLong) { - return new GeoPoint().resetFromIndexHash(indexLong); - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index d5cc6846865..69ab2059ccf 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; -import static org.apache.lucene.spatial.util.GeoDistanceUtils.maxRadialDistanceMeters; import java.io.IOException; @@ -67,6 +66,9 @@ public class GeoUtils { /** Earth ellipsoid polar distance in meters */ public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS; + /** rounding error for quantized latitude and longitude values */ + public static final double TOLERANCE = 1E-6; + /** Returns the minimum between the provided distance 'initialRadius' and the * maximum distance/radius from the point 'center' before overlapping **/ @@ -468,6 +470,14 @@ public class GeoUtils { } } + /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ + public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { + if (Math.abs(centerLat) == MAX_LAT) { + return SloppyMath.haversinMeters(centerLat, centerLon, 0, centerLon); + } + return SloppyMath.haversinMeters(centerLat, centerLon, centerLat, (MAX_LON + centerLon) % 360); + } + private GeoUtils() { } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 9b995f423a3..75f400fdc9d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -45,6 +45,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SmallFloat; import java.io.IOException; +import java.util.Objects; import java.util.Set; /** @@ -63,6 +64,19 @@ public final class AllTermQuery extends Query { this.term = term; } + @Override + public boolean equals(Object obj) { + if (sameClassAs(obj) == false) { + return false; + } + return Objects.equals(term, ((AllTermQuery) obj).term); + } + + @Override + public int hashCode() { + return 31 * classHash() + term.hashCode(); + } + @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = super.rewrite(reader); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java index a25b4c0aa29..9caf350926c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java @@ -66,4 +66,14 @@ public class MatchNoDocsQuery extends Query { public String toString(String field) { return "MatchNoDocsQuery[\"" + reason + "\"]"; } + + @Override + public boolean equals(Object obj) { + return sameClassAs(obj); + } + + @Override + public int hashCode() { + return classHash(); + } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index fbe0c28e341..06ab2b4a530 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -84,14 +84,14 @@ public class MoreLikeThisQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), boostTerms, boostTermsFactor, Arrays.hashCode(likeText), + return Objects.hash(classHash(), boostTerms, boostTermsFactor, Arrays.hashCode(likeText), maxDocFreq, maxQueryTerms, maxWordLen, minDocFreq, minTermFrequency, minWordLen, Arrays.hashCode(moreLikeFields), minimumShouldMatch, stopWords); } @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } MoreLikeThisQuery other = (MoreLikeThisQuery) obj; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 05006ec0db7..87bfdacb1c7 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -238,7 +238,7 @@ public class MultiPhrasePrefixQuery extends Query { */ @Override public boolean equals(Object o) { - if (super.equals(o) == false) { + if (sameClassAs(o) == false) { return false; } MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o; @@ -252,7 +252,7 @@ public class MultiPhrasePrefixQuery extends Query { */ @Override public int hashCode() { - return super.hashCode() + return classHash() ^ slop ^ termArraysHashCode() ^ positions.hashCode(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index e62f3f6665a..3927dcd518e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -355,7 +355,7 @@ public class FiltersFunctionScoreQuery extends Query { if (this == o) { return true; } - if (super.equals(o) == false) { + if (sameClassAs(o) == false) { return false; } FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o; @@ -367,6 +367,6 @@ public class FiltersFunctionScoreQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), subQuery, maxBoost, combineFunction, minScore, scoreMode, Arrays.hashCode(filterFunctions)); + return Objects.hash(classHash(), subQuery, maxBoost, combineFunction, minScore, scoreMode, Arrays.hashCode(filterFunctions)); } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index 646076a3a17..be98a07a9c1 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -210,7 +210,7 @@ public class FunctionScoreQuery extends Query { if (this == o) { return true; } - if (super.equals(o) == false) { + if (sameClassAs(o) == false) { return false; } FunctionScoreQuery other = (FunctionScoreQuery) o; @@ -221,6 +221,6 @@ public class FunctionScoreQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), subQuery.hashCode(), function, combineFunction, minScore, maxBoost); + return Objects.hash(classHash(), subQuery.hashCode(), function, combineFunction, minScore, maxBoost); } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 6eb8df68242..8c2d4dc01bf 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -181,6 +181,7 @@ public final class ClusterSettings extends AbstractScopedSettings { IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, + IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING, IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, MetaData.SETTING_READ_ONLY_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index b9d0c6b4c70..fb60453d467 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -116,6 +116,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.ALLOW_UNMAPPED, IndexSettings.INDEX_CHECK_ON_STARTUP, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, + IndexSettings.MAX_SLICES_PER_SCROLL, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 5780dc256a5..1902f24c4b7 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -67,6 +67,7 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.AliasFilterParsingException; import org.elasticsearch.indices.InvalidAliasNameException; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -93,7 +94,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; /** * */ -public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable { +public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex { private final IndexEventListener eventListener; private final AnalysisService analysisService; @@ -184,8 +185,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC /** * Return the shard with the provided id, or null if there is no such shard. */ - @Nullable - public IndexShard getShardOrNull(int shardId) { + @Override + public @Nullable IndexShard getShardOrNull(int shardId) { return shards.get(shardId); } @@ -359,6 +360,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC return primary == false && IndexMetaData.isIndexUsingShadowReplicas(indexSettings); } + @Override public synchronized void removeShard(int shardId, String reason) { final ShardId sId = new ShardId(index(), shardId); final IndexShard indexShard; @@ -470,6 +472,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC return searchOperationListeners; } + @Override + public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { + return mapperService().updateMapping(indexMetaData); + } + private class StoreCloseListener implements Store.OnClose { private final ShardId shardId; private final boolean ownsShard; @@ -617,6 +624,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC return indexSettings.getIndexMetaData(); } + @Override public synchronized void updateMetaData(final IndexMetaData metadata) { final Translog.Durability oldTranslogDurability = indexSettings.getTranslogDurability(); if (indexSettings.updateIndexMetaData(metadata)) { diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 592c1ff1125..2c20697d757 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -121,6 +121,12 @@ public final class IndexSettings { public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0, Property.Dynamic, Property.IndexScope); + /** + * The maximum number of slices allowed in a scroll request + */ + public static final Setting MAX_SLICES_PER_SCROLL = Setting.intSetting("index.max_slices_per_scroll", + 1024, 1, Property.Dynamic, Property.IndexScope); + private final Index index; private final Version version; private final ESLogger logger; @@ -154,6 +160,11 @@ public final class IndexSettings { * The maximum number of refresh listeners allows on this shard. */ private volatile int maxRefreshListeners; + /** + * The maximum number of slices allowed in a scroll request. + */ + private volatile int maxSlicesPerScroll; + /** * Returns the default search field for this index. @@ -239,6 +250,7 @@ public final class IndexSettings { maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); + maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); this.mergePolicyConfig = new MergePolicyConfig(logger, this); assert indexNameMatcher.test(indexMetaData.getIndex().getName()); @@ -262,6 +274,7 @@ public final class IndexSettings { scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval); scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); + scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); } private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { @@ -391,7 +404,7 @@ public final class IndexSettings { * * @return true iff any setting has been updated otherwise false. */ - synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) { + public synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) { final Settings newSettings = indexMetaData.getSettings(); if (version.equals(Version.indexCreated(newSettings)) == false) { throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings)); @@ -521,5 +534,16 @@ public final class IndexSettings { this.maxRefreshListeners = maxRefreshListeners; } + /** + * The maximum number of slices allowed in a scroll request. + */ + public int getMaxSlicesPerScroll() { + return maxSlicesPerScroll; + } + + private void setMaxSlicesPerScroll(int value) { + this.maxSlicesPerScroll = value; + } + IndexScopedSettings getScopedSettings() { return scopedSettings;} } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java index 949e1ef0fb4..db428db153d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PatternReplaceCharFilterFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -35,10 +36,11 @@ public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory { public PatternReplaceCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name); - if (!Strings.hasLength(settings.get("pattern"))) { + String sPattern = settings.get("pattern"); + if (!Strings.hasLength(sPattern)) { throw new IllegalArgumentException("pattern is missing for [" + name + "] char filter of type 'pattern_replace'"); } - pattern = Pattern.compile(settings.get("pattern")); + pattern = Regex.compile(sPattern, settings.get("flags")); replacement = settings.get("replacement", ""); // when not set or set to "", use "". } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index e3dc84a3477..967d07174b9 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.spatial.geopoint.document.GeoPointField; -import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; @@ -426,7 +425,7 @@ public final class OrdinalsBuilder implements Closeable { protected AcceptStatus accept(BytesRef term) throws IOException { // accept only the max resolution terms // todo is this necessary? - return GeoEncodingUtils.getPrefixCodedShift(term) == GeoPointField.PRECISION_STEP * 4 ? + return GeoPointField.getPrefixCodedShift(term) == GeoPointField.PRECISION_STEP * 4 ? AcceptStatus.YES : AcceptStatus.END; } }; diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index c18f96c06b0..90554bd1308 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.spatial.geopoint.document.GeoPointField; -import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.CharsRefBuilder; @@ -58,7 +57,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData cursor : indexMetaData.getMappings().values()) { + MappingMetaData mappingMd = cursor.value; + String mappingType = mappingMd.type(); + CompressedXContent mappingSource = mappingMd.source(); + // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same + // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the + // merge version of it, which it does when refreshing the mappings), and warn log it. + try { + DocumentMapper existingMapper = documentMapper(mappingType); + + if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) { + String op = existingMapper == null ? "adding" : "updating"; + if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) { + logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string()); + } else if (logger.isTraceEnabled()) { + logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string()); + } else { + logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op, + mappingType); + } + merge(mappingType, mappingSource, MergeReason.MAPPING_RECOVERY, true); + if (!documentMapper(mappingType).mappingSource().equals(mappingSource)) { + logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(), + mappingType, mappingSource, documentMapper(mappingType).mappingSource()); + requireRefresh = true; + } + } + } catch (Throwable e) { + logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index(), mappingType, mappingSource); + throw e; + } + } + return requireRefresh; + } + //TODO: make this atomic public void merge(Map> mappings, boolean updateAllTypes) throws MapperParsingException { // first, add the default mapping diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index a79631481d2..66cb7255fd6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -204,7 +204,7 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu @Override public boolean equals(Object o) { if (this == o) return true; - if (!super.equals(o)) return false; + if (sameClassAs(o) == false) return false; LateParsingQuery that = (LateParsingQuery) o; if (includeLower != that.includeLower) return false; @@ -218,7 +218,7 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu @Override public int hashCode() { - return Objects.hash(super.hashCode(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); + return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LegacyDateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LegacyDateFieldMapper.java index a7e44ba3654..a3374153955 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LegacyDateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LegacyDateFieldMapper.java @@ -213,7 +213,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { @Override public boolean equals(Object o) { if (this == o) return true; - if (!super.equals(o)) return false; + if (sameClassAs(o) == false) return false; LateParsingQuery that = (LateParsingQuery) o; if (includeLower != that.includeLower) return false; @@ -227,7 +227,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { @Override public int hashCode() { - return Objects.hash(super.hashCode(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); + return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index c72d746158b..8e6b538a2ae 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -239,6 +239,13 @@ public class TypeParsers { Map.Entry entry = iterator.next(); final String propName = entry.getKey(); final Object propNode = entry.getValue(); + if (false == propName.equals("null_value") && propNode == null) { + /* + * No properties *except* null_value are allowed to have null. So we catch it here and tell the user something useful rather + * than send them a null pointer exception later. + */ + throw new MapperParsingException("[" + propName + "] must not have a [null] value"); + } if (propName.equals("store")) { builder.store(parseStore(name, propNode.toString(), parserContext)); iterator.remove(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index f960ecaa977..d882be8e9d7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -195,7 +195,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } TypeQuery that = (TypeQuery) obj; @@ -204,7 +204,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { @Override public int hashCode() { - return 31 * super.hashCode() + type.hashCode(); + return 31 * classHash() + type.hashCode(); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 1e2a078925f..a123f64c4d6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -23,7 +23,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; -import org.apache.lucene.document.XInetAddressPoint; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.XPointValues; @@ -176,7 +175,7 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include if (fields.length == 2) { InetAddress address = InetAddresses.forString(fields[0]); int prefixLength = Integer.parseInt(fields[1]); - return XInetAddressPoint.newPrefixQuery(name(), address, prefixLength); + return InetAddressPoint.newPrefixQuery(name(), address, prefixLength); } else { throw new IllegalArgumentException("Expected [ip/prefix] but was [" + term + "]"); } @@ -191,27 +190,27 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include failIfNotIndexed(); InetAddress lower; if (lowerTerm == null) { - lower = XInetAddressPoint.MIN_VALUE; + lower = InetAddressPoint.MIN_VALUE; } else { lower = parse(lowerTerm); if (includeLower == false) { - if (lower.equals(XInetAddressPoint.MAX_VALUE)) { + if (lower.equals(InetAddressPoint.MAX_VALUE)) { return new MatchNoDocsQuery(); } - lower = XInetAddressPoint.nextUp(lower); + lower = InetAddressPoint.nextUp(lower); } } InetAddress upper; if (upperTerm == null) { - upper = XInetAddressPoint.MAX_VALUE; + upper = InetAddressPoint.MAX_VALUE; } else { upper = parse(upperTerm); if (includeUpper == false) { - if (upper.equals(XInetAddressPoint.MIN_VALUE)) { + if (upper.equals(InetAddressPoint.MIN_VALUE)) { return new MatchNoDocsQuery(); } - upper = XInetAddressPoint.nextDown(upper); + upper = InetAddressPoint.nextDown(upper); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java index 06f30a3477e..cf18d5a4c10 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.apache.lucene.spatial.geopoint.search.XGeoPointDistanceRangeQuery; -import org.apache.lucene.spatial.util.GeoDistanceUtils; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -48,8 +47,6 @@ import java.util.Locale; import java.util.Objects; import java.util.Optional; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE; - public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "geo_distance_range"; @@ -354,7 +351,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder public boolean equals(Object obj) { if (this == obj) return true; - if (!super.equals(obj)) + if (sameClassAs(obj) == false) return false; ScriptQuery other = (ScriptQuery) obj; return Objects.equals(script, other.script); @@ -192,7 +192,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder @Override public int hashCode() { - return Objects.hash(super.hashCode(), script); + return Objects.hash(classHash(), script); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java index 6f92e411c00..6c4fd23e64c 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java @@ -190,7 +190,7 @@ public class GeoDistanceRangeQuery extends Query { @Override public boolean equals(Object o) { if (this == o) return true; - if (super.equals(o) == false) return false; + if (sameClassAs(o) == false) return false; GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) o; @@ -212,7 +212,7 @@ public class GeoDistanceRangeQuery extends Query { @Override public int hashCode() { - int result = super.hashCode(); + int result = classHash(); long temp; temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L; result = 31 * result + Long.hashCode(temp); diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java index d62aa76efd9..c3a52cb114e 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java @@ -111,7 +111,7 @@ public class GeoPolygonQuery extends Query { @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } GeoPolygonQuery that = (GeoPolygonQuery) obj; @@ -121,7 +121,7 @@ public class GeoPolygonQuery extends Query { @Override public int hashCode() { - int h = super.hashCode(); + int h = classHash(); h = 31 * h + indexFieldData.getFieldName().hashCode(); h = 31 * h + Arrays.hashCode(points); return h; diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java index 2f2801a2abe..789ee25e1b5 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxQuery.java @@ -84,7 +84,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query { @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } InMemoryGeoBoundingBoxQuery other = (InMemoryGeoBoundingBoxQuery) obj; @@ -95,7 +95,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), fieldName(), topLeft, bottomRight); + return Objects.hash(classHash(), fieldName(), topLeft, bottomRight); } private static class Meridian180GeoBoundingBoxBits implements Bits { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index 916cf563fb8..9082fc072da 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -62,13 +62,6 @@ public class CommitPoint { public String checksum() { return checksum; } - - public boolean isSame(StoreFileMetaData md) { - if (checksum == null || md.checksum() == null) { - return false; - } - return length == md.length() && checksum.equals(md.checksum()); - } } public static enum Type { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 105d1e5b057..16ebd97d8c3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -37,8 +37,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; @@ -108,6 +106,7 @@ import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTargetService; @@ -136,7 +135,7 @@ import java.util.function.Consumer; import java.util.function.BiConsumer; import java.util.stream.Collectors; -public class IndexShard extends AbstractIndexShardComponent { +public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { private final ThreadPool threadPool; private final MapperService mapperService; @@ -338,6 +337,7 @@ public class IndexShard extends AbstractIndexShardComponent { /** * Returns the latest cluster routing entry received with this shard. */ + @Override public ShardRouting routingEntry() { return this.shardRouting; } @@ -348,13 +348,12 @@ public class IndexShard extends AbstractIndexShardComponent { /** * Updates the shards routing entry. This mutate the shards internal state depending - * on the changes that get introduced by the new routing value. This method will persist shard level metadata - * unless explicitly disabled. + * on the changes that get introduced by the new routing value. This method will persist shard level metadata. * * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted * @throws IOException if shard state could not be persisted */ - public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) throws IOException { + public void updateRoutingEntry(final ShardRouting newRouting) throws IOException { final ShardRouting currentRouting = this.shardRouting; if (!newRouting.shardId().equals(shardId())) { throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + ""); @@ -408,9 +407,7 @@ public class IndexShard extends AbstractIndexShardComponent { } this.shardRouting = newRouting; indexEventListener.shardRoutingChanged(this, currentRouting, newRouting); - if (persistState) { - persistMetadata(newRouting, currentRouting); - } + persistMetadata(newRouting, currentRouting); } /** @@ -589,7 +586,7 @@ public class IndexShard extends AbstractIndexShardComponent { */ public void refresh(String source) { verifyNotClosed(); - + if (canIndex()) { long bytes = getEngine().getIndexBufferRAMBytesUsed(); writingBytes.addAndGet(bytes); @@ -1370,35 +1367,36 @@ public class IndexShard extends AbstractIndexShardComponent { return this.currentEngineReference.get(); } - public void startRecovery(DiscoveryNode localNode, DiscoveryNode sourceNode, RecoveryTargetService recoveryTargetService, + public void startRecovery(RecoveryState recoveryState, RecoveryTargetService recoveryTargetService, RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, - BiConsumer mappingUpdateConsumer, IndicesService indicesService) { - final RestoreSource restoreSource = shardRouting.restoreSource(); - - if (shardRouting.isPeerRecovery()) { - assert sourceNode != null : "peer recovery started but sourceNode is null"; - // we don't mark this one as relocated at the end. - // For primaries: requests in any case are routed to both when its relocating and that way we handle - // the edge case where its mark as relocated, and we might need to roll it back... - // For replicas: we are recovering a backup from a primary - RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA; - RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(), type, sourceNode, localNode); - try { - markAsRecovering("from " + sourceNode, recoveryState); - recoveryTargetService.startRecovery(this, type, sourceNode, recoveryListener); - } catch (Throwable e) { - failShard("corrupted preexisting index", e); - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, e), true); - } - } else if (restoreSource == null) { - // recover from filesystem store - - IndexMetaData indexMetaData = indexSettings().getIndexMetaData(); - Index mergeSourceIndex = indexMetaData.getMergeSourceIndex(); - final boolean recoverFromLocalShards = mergeSourceIndex != null && shardRouting.allocatedPostIndexCreate(indexMetaData) == false && shardRouting.primary(); - final RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(), - recoverFromLocalShards ? RecoveryState.Type.LOCAL_SHARDS : RecoveryState.Type.STORE, localNode, localNode); - if (recoverFromLocalShards) { + BiConsumer mappingUpdateConsumer, + IndicesService indicesService) { + switch (recoveryState.getType()) { + case PRIMARY_RELOCATION: + case REPLICA: + try { + markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState); + recoveryTargetService.startRecovery(this, recoveryState.getType(), recoveryState.getSourceNode(), recoveryListener); + } catch (Throwable e) { + failShard("corrupted preexisting index", e); + recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + } + break; + case STORE: + markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread + threadPool.generic().execute(() -> { + try { + if (recoverFromStore()) { + recoveryListener.onRecoveryDone(recoveryState); + } + } catch (Throwable t) { + recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, t), true); + } + }); + break; + case LOCAL_SHARDS: + final IndexMetaData indexMetaData = indexSettings().getIndexMetaData(); + final Index mergeSourceIndex = indexMetaData.getMergeSourceIndex(); final List startedShards = new ArrayList<>(); final IndexService sourceIndexService = indicesService.indexService(mergeSourceIndex); final int numShards = sourceIndexService != null ? sourceIndexService.getIndexSettings().getNumberOfShards() : -1; @@ -1414,14 +1412,14 @@ public class IndexShard extends AbstractIndexShardComponent { threadPool.generic().execute(() -> { try { final Set shards = IndexMetaData.selectShrinkShards(shardId().id(), sourceIndexService.getMetaData(), - indexMetaData.getNumberOfShards()); + + indexMetaData.getNumberOfShards()); if (recoverFromLocalShards(mappingUpdateConsumer, startedShards.stream() .filter((s) -> shards.contains(s.shardId())).collect(Collectors.toList()))) { recoveryListener.onRecoveryDone(recoveryState); } } catch (Throwable t) { recoveryListener.onRecoveryFailure(recoveryState, - new RecoveryFailedException(shardId, localNode, localNode, t), true); + new RecoveryFailedException(recoveryState, null, t), true); } }); } else { @@ -1433,36 +1431,25 @@ public class IndexShard extends AbstractIndexShardComponent { + " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard " + shardId()); } - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, localNode, localNode, t), true); + recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, t), true); } - } else { - markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread + break; + case SNAPSHOT: + markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread threadPool.generic().execute(() -> { try { - if (recoverFromStore()) { + final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository( + recoveryState.getRestoreSource().snapshot().getRepository()); + if (restoreFromRepository(indexShardRepository)) { recoveryListener.onRecoveryDone(recoveryState); } - } catch (Throwable t) { - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, t), true); + } catch (Throwable first) { + recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, first), true); } - }); - } - } else { - // recover from a restore - final RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(), - RecoveryState.Type.SNAPSHOT, shardRouting.restoreSource(), localNode); - markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread - threadPool.generic().execute(() -> { - try { - final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshot().getRepository()); - if (restoreFromRepository(indexShardRepository)) { - recoveryListener.onRecoveryDone(recoveryState); - } - } catch (Throwable first) { - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, first), true); - } - }); + break; + default: + throw new IllegalArgumentException("Unknown recovery type " + recoveryState.getType()); } } @@ -1472,7 +1459,7 @@ public class IndexShard extends AbstractIndexShardComponent { // called by the current engine @Override public void onFailedEngine(String reason, @Nullable Throwable failure) { - final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure, getIndexUUID()); + final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure); for (Callback listener : delegates) { try { listener.handle(shardFailure); @@ -1661,13 +1648,11 @@ public class IndexShard extends AbstractIndexShardComponent { public final String reason; @Nullable public final Throwable cause; - public final String indexUUID; - public ShardFailure(ShardRouting routing, String reason, @Nullable Throwable cause, String indexUUID) { + public ShardFailure(ShardRouting routing, String reason, @Nullable Throwable cause) { this.routing = routing; this.reason = reason; this.cause = cause; - this.indexUUID = indexUUID; } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index ab3e334714a..b594b31abb8 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -68,15 +68,16 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener * @param location the location to listen for * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with * false otherwise. + * @return did we call the listener (true) or register the listener to call later (false)? */ - public void addOrNotify(Translog.Location location, Consumer listener) { + public boolean addOrNotify(Translog.Location location, Consumer listener) { requireNonNull(listener, "listener cannot be null"); requireNonNull(location, "location cannot be null"); if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) { // Location already visible, just call the listener listener.accept(false); - return; + return true; } synchronized (this) { if (refreshListeners == null) { @@ -85,12 +86,13 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) { // We have a free slot so register the listener refreshListeners.add(new Tuple<>(location, listener)); - return; + return false; } } // No free slot so force a refresh and call the listener in this thread forceRefresh.run(); listener.accept(true); + return true; } /** @@ -135,14 +137,14 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener */ return; } - // First check if we've actually moved forward. If not then just bail immediately. - assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0; - if (lastRefreshedLocation != null && currentRefreshLocation.compareTo(lastRefreshedLocation) == 0) { - return; - } /* * Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing - * around with refreshListeners or synchronizing at all. + * around with refreshListeners or synchronizing at all. Note that it is not safe for us to abort early if we haven't advanced the + * position here because we set and read lastRefreshedLocation outside of a synchronized block. We do that so that waiting for a + * refresh that has already passed is just a volatile read but the cost is that any check whether or not we've advanced the + * position will introduce a race between adding the listener and the position check. We could work around this by moving this + * assignment into the synchronized block below and double checking lastRefreshedLocation in addOrNotify's synchronized block but + * that doesn't seem worth it given that we already skip this process early if there aren't any listeners to iterate. */ lastRefreshedLocation = currentRefreshLocation; /* diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index e22f684637e..e35c95ae1f0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -59,16 +59,16 @@ public final class ShadowIndexShard extends IndexShard { /** * In addition to the regular accounting done in - * {@link IndexShard#updateRoutingEntry(ShardRouting, boolean)}, + * {@link IndexShard#updateRoutingEntry(ShardRouting)}, * if this shadow replica needs to be promoted to a primary, the shard is * failed in order to allow a new primary to be re-allocated. */ @Override - public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) throws IOException { + public void updateRoutingEntry(ShardRouting newRouting) throws IOException { if (newRouting.primary() == true) {// becoming a primary throw new IllegalStateException("can't promote shard to primary"); } - super.updateRoutingEntry(newRouting, persistState); + super.updateRoutingEntry(newRouting); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 62173f936c5..dbfcad6048a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -131,16 +131,7 @@ final class StoreRecovery { } final void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException { - /* - * TODO: once we upgraded to Lucene 6.1 use HardlinkCopyDirectoryWrapper to enable hardlinks if possible and enable it - * in the security.policy: - * - * grant codeBase "${codebase.lucene-misc-6.1.0.jar}" { - * // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper - * permission java.nio.file.LinkPermission "hard"; - * }; - * target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); - */ + target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), new IndexWriterConfig(null) .setCommitOnClose(false) diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index e0032fe503b..94337ecdbc5 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -23,6 +23,8 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -49,7 +51,6 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -458,7 +459,9 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements } if (latest >= 0) { try { - return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest); + final BlobStoreIndexShardSnapshots shardSnapshots = + indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)); + return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest); } @@ -503,10 +506,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements */ public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { super(snapshotId, Version.CURRENT, shardId); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - store = indexService.getShardOrNull(shardId.id()).store(); this.snapshotStatus = snapshotStatus; - + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -788,8 +789,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements */ public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { super(snapshotId, version, shardId, snapshotShardId); - store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); this.recoveryState = recoveryState; + store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store(); } /** @@ -800,6 +801,25 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); BlobStoreIndexShardSnapshot snapshot = loadSnapshot(); + + if (snapshot.indexFiles().size() == 1 + && snapshot.indexFiles().get(0).physicalName().startsWith("segments_") + && snapshot.indexFiles().get(0).hasUnknownChecksum()) { + // If the shard has no documents, it will only contain a single segments_N file for the + // shard's snapshot. If we are restoring a snapshot created by a previous supported version, + // it is still possible that in that version, an empty shard has a segments_N file with an unsupported + // version (and no checksum), because we don't know the Lucene version to assign segments_N until we + // have written some data. Since the segments_N for an empty shard could have an incompatible Lucene + // version number and no checksum, even though the index itself is perfectly fine to restore, this + // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty + // shard anyway, we just create the empty shard here and then exit. + IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE) + .setCommitOnClose(true)); + writer.close(); + return; + } + SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); final Store.MetadataSnapshot recoveryTargetMetadata; try { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 60b7ec2112e..5bb0f728bc1 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.snapshots.blobstore; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil * Information about snapshotted file */ public static class FileInfo { + private static final String UNKNOWN_CHECKSUM = "_na_"; + private final String name; private final ByteSizeValue partSize; private final long partBytes; @@ -207,27 +208,43 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil * @return true if file in a store this this file have the same checksum and length */ public boolean isSame(FileInfo fileInfo) { - if (numberOfParts != fileInfo.numberOfParts) return false; - if (partBytes != fileInfo.partBytes) return false; - if (!name.equals(fileInfo.name)) return false; + if (numberOfParts != fileInfo.numberOfParts) { + return false; + } + if (partBytes != fileInfo.partBytes) { + return false; + } + if (!name.equals(fileInfo.name)) { + return false; + } if (partSize != null) { - if (!partSize.equals(fileInfo.partSize)) return false; + if (!partSize.equals(fileInfo.partSize)) { + return false; + } } else { - if (fileInfo.partSize != null) return false; + if (fileInfo.partSize != null) { + return false; + } } return metadata.isSame(fileInfo.metadata); } - static final class Fields { - static final String NAME = "name"; - static final String PHYSICAL_NAME = "physical_name"; - static final String LENGTH = "length"; - static final String CHECKSUM = "checksum"; - static final String PART_SIZE = "part_size"; - static final String WRITTEN_BY = "written_by"; - static final String META_HASH = "meta_hash"; + /** + * Checks if the checksum for the file is unknown. This only is possible on an empty shard's + * segments_N file which was created in older Lucene versions. + */ + public boolean hasUnknownChecksum() { + return metadata.checksum().equals(UNKNOWN_CHECKSUM); } + static final String NAME = "name"; + static final String PHYSICAL_NAME = "physical_name"; + static final String LENGTH = "length"; + static final String CHECKSUM = "checksum"; + static final String PART_SIZE = "part_size"; + static final String WRITTEN_BY = "written_by"; + static final String META_HASH = "meta_hash"; + /** * Serializes file info into JSON * @@ -237,22 +254,22 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(Fields.NAME, file.name); - builder.field(Fields.PHYSICAL_NAME, file.metadata.name()); - builder.field(Fields.LENGTH, file.metadata.length()); - if (file.metadata.checksum() != null) { - builder.field(Fields.CHECKSUM, file.metadata.checksum()); + builder.field(NAME, file.name); + builder.field(PHYSICAL_NAME, file.metadata.name()); + builder.field(LENGTH, file.metadata.length()); + if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { + builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { - builder.field(Fields.PART_SIZE, file.partSize.bytes()); + builder.field(PART_SIZE, file.partSize.bytes()); } if (file.metadata.writtenBy() != null) { - builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy()); + builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(Fields.META_HASH, file.metadata.hash()); + builder.field(META_HASH, file.metadata.hash()); } builder.endObject(); } @@ -271,6 +288,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil String checksum = null; ByteSizeValue partSize = null; Version writtenBy = null; + String writtenByStr = null; BytesRef metaHash = new BytesRef(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -278,19 +296,20 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if ("name".equals(currentFieldName)) { + if (NAME.equals(currentFieldName)) { name = parser.text(); - } else if ("physical_name".equals(currentFieldName)) { + } else if (PHYSICAL_NAME.equals(currentFieldName)) { physicalName = parser.text(); - } else if ("length".equals(currentFieldName)) { + } else if (LENGTH.equals(currentFieldName)) { length = parser.longValue(); - } else if ("checksum".equals(currentFieldName)) { + } else if (CHECKSUM.equals(currentFieldName)) { checksum = parser.text(); - } else if ("part_size".equals(currentFieldName)) { + } else if (PART_SIZE.equals(currentFieldName)) { partSize = new ByteSizeValue(parser.longValue()); - } else if ("written_by".equals(currentFieldName)) { - writtenBy = Lucene.parseVersionLenient(parser.text(), null); - } else if ("meta_hash".equals(currentFieldName)) { + } else if (WRITTEN_BY.equals(currentFieldName)) { + writtenByStr = parser.text(); + writtenBy = Lucene.parseVersionLenient(writtenByStr, null); + } else if (META_HASH.equals(currentFieldName)) { metaHash.bytes = parser.binaryValue(); metaHash.offset = 0; metaHash.length = metaHash.bytes.length; @@ -305,6 +324,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil } } } + // Verify that file information is complete if (name == null || Strings.validFileName(name) == false) { throw new ElasticsearchParseException("missing or invalid file name [" + name + "]"); @@ -312,10 +332,29 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil throw new ElasticsearchParseException("missing or invalid physical file name [" + physicalName + "]"); } else if (length < 0) { throw new ElasticsearchParseException("missing or invalid file length"); + } else if (writtenBy == null) { + throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]"); + } else if (checksum == null) { + if (physicalName.startsWith("segments_") + && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) { + // its possible the checksum is null for segments_N files that belong to a shard with no data, + // so we will assign it _na_ for now and try to get the checksum from the file itself later + checksum = UNKNOWN_CHECKSUM; + } else { + throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); + } } return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize); } + @Override + public String toString() { + return "[name: " + name + + ", numberOfParts: " + numberOfParts + + ", partSize: " + partSize + + ", partBytes: " + partBytes + + ", metadata: " + metadata + "]"; + } } private final String snapshot; @@ -424,26 +463,21 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil return totalSize; } - static final class Fields { - static final String NAME = "name"; - static final String INDEX_VERSION = "index_version"; - static final String START_TIME = "start_time"; - static final String TIME = "time"; - static final String NUMBER_OF_FILES = "number_of_files"; - static final String TOTAL_SIZE = "total_size"; - static final String FILES = "files"; - } - - static final class ParseFields { - static final ParseField NAME = new ParseField("name"); - static final ParseField INDEX_VERSION = new ParseField("index_version", "index-version"); - static final ParseField START_TIME = new ParseField("start_time"); - static final ParseField TIME = new ParseField("time"); - static final ParseField NUMBER_OF_FILES = new ParseField("number_of_files"); - static final ParseField TOTAL_SIZE = new ParseField("total_size"); - static final ParseField FILES = new ParseField("files"); - } + private static final String NAME = "name"; + private static final String INDEX_VERSION = "index_version"; + private static final String START_TIME = "start_time"; + private static final String TIME = "time"; + private static final String NUMBER_OF_FILES = "number_of_files"; + private static final String TOTAL_SIZE = "total_size"; + private static final String FILES = "files"; + private static final ParseField PARSE_NAME = new ParseField("name"); + private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version"); + private static final ParseField PARSE_START_TIME = new ParseField("start_time"); + private static final ParseField PARSE_TIME = new ParseField("time"); + private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); + private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); + private static final ParseField PARSE_FILES = new ParseField("files"); /** * Serializes shard snapshot metadata info into JSON @@ -453,13 +487,13 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.NAME, snapshot); - builder.field(Fields.INDEX_VERSION, indexVersion); - builder.field(Fields.START_TIME, startTime); - builder.field(Fields.TIME, time); - builder.field(Fields.NUMBER_OF_FILES, numberOfFiles); - builder.field(Fields.TOTAL_SIZE, totalSize); - builder.startArray(Fields.FILES); + builder.field(NAME, snapshot); + builder.field(INDEX_VERSION, indexVersion); + builder.field(START_TIME, startTime); + builder.field(TIME, time); + builder.field(NUMBER_OF_FILES, numberOfFiles); + builder.field(TOTAL_SIZE, totalSize); + builder.startArray(FILES); for (FileInfo fileInfo : indexFiles) { FileInfo.toXContent(fileInfo, builder, params); } @@ -493,24 +527,24 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) { snapshot = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) { startTime = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) { time = parser.longValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) { numberOfFiles = parser.intValue(); - } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) { + } else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) { totalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) { + if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } @@ -526,6 +560,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil } } return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), - startTime, time, numberOfFiles, totalSize); + startTime, time, numberOfFiles, totalSize); } + } diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index a720f5cb258..166f978a4db 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -41,7 +41,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.Version; @@ -444,11 +443,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } /** - * The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version - * and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only - * a legacy checksum, returned IndexOutput will not verify the checksum. + * The returned IndexOutput validates the files checksum. *

- * Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the + * Note: Checksums are calculated by default since version 4.8.0. This method only adds the * verification against the checksum in the given metadata and does not add any significant overhead. */ public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException { @@ -652,17 +649,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref // different in the diff. That's why we have to double check here again if the rest of it matches. // all is fine this file is just part of a commit or a segment that is different - final boolean same = local.isSame(remote); - - // this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just - // verifying that we are consistent on both ends source and target - final boolean hashAndLengthEqual = ( - local.checksum() == null - && remote.checksum() == null - && local.hash().equals(remote.hash()) - && local.length() == remote.length()); - final boolean consistent = hashAndLengthEqual || same; - if (consistent == false) { + if (local.isSame(remote) == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); } @@ -898,18 +885,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } } - /** - * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB - */ - public static BytesRef hashFile(Directory directory, String file) throws IOException { - final BytesRefBuilder fileHash = new BytesRefBuilder(); - try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) { - hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length()); - } - return fileHash.get(); - } - - /** * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB */ diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index e163b15f60e..2653f01c81d 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -21,10 +21,8 @@ package org.elasticsearch.index.store; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; @@ -58,14 +56,15 @@ public class StoreFileMetaData implements Writeable { } public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) { - assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that " - + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; - Objects.requireNonNull(writtenBy, "writtenBy must not be null"); - Objects.requireNonNull(checksum, "checksum must not be null"); - this.name = name; + // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the + // file is a segments_N file, but that is fine in the case of a segments_N file because + // we handle that case upstream + assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) : + "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; + this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; - this.checksum = checksum; - this.writtenBy = writtenBy; + this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); + this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null"); this.hash = hash == null ? new BytesRef() : hash; } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index bd01e7f0183..70b9443e043 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -45,6 +45,7 @@ import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { @@ -52,6 +53,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, "indices.queries.cache.size", "10%", Property.NodeScope); public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting( "indices.queries.cache.count", 10000, 1, Property.NodeScope); + // enables caching on all segments instead of only the larger ones, for testing only + public static final Setting INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting( + "indices.queries.cache.all_segments", false, Property.NodeScope); private final LRUQueryCache cache; private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); @@ -69,111 +73,11 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings); logger.debug("using [node] query cache with size [{}] max filter count [{}]", size, count); - cache = new LRUQueryCache(count, size.bytes()) { - - private Stats getStats(Object coreKey) { - final ShardId shardId = shardKeyMap.getShardId(coreKey); - if (shardId == null) { - return null; - } - return shardStats.get(shardId); - } - - private Stats getOrCreateStats(Object coreKey) { - final ShardId shardId = shardKeyMap.getShardId(coreKey); - Stats stats = shardStats.get(shardId); - if (stats == null) { - stats = new Stats(); - shardStats.put(shardId, stats); - } - return stats; - } - - // It's ok to not protect these callbacks by a lock since it is - // done in LRUQueryCache - @Override - protected void onClear() { - assert Thread.holdsLock(this); - super.onClear(); - for (Stats stats : shardStats.values()) { - // don't throw away hit/miss - stats.cacheSize = 0; - stats.ramBytesUsed = 0; - } - sharedRamBytesUsed = 0; - } - - @Override - protected void onQueryCache(Query filter, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onQueryCache(filter, ramBytesUsed); - sharedRamBytesUsed += ramBytesUsed; - } - - @Override - protected void onQueryEviction(Query filter, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onQueryEviction(filter, ramBytesUsed); - sharedRamBytesUsed -= ramBytesUsed; - } - - @Override - protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onDocIdSetCache(readerCoreKey, ramBytesUsed); - final Stats shardStats = getOrCreateStats(readerCoreKey); - shardStats.cacheSize += 1; - shardStats.cacheCount += 1; - shardStats.ramBytesUsed += ramBytesUsed; - - StatsAndCount statsAndCount = stats2.get(readerCoreKey); - if (statsAndCount == null) { - statsAndCount = new StatsAndCount(shardStats); - stats2.put(readerCoreKey, statsAndCount); - } - statsAndCount.count += 1; - } - - @Override - protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { - assert Thread.holdsLock(this); - super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); - // onDocIdSetEviction might sometimes be called with a number - // of entries equal to zero if the cache for the given segment - // was already empty when the close listener was called - if (numEntries > 0) { - // We can't use ShardCoreKeyMap here because its core closed - // listener is called before the listener of the cache which - // triggers this eviction. So instead we use use stats2 that - // we only evict when nothing is cached anymore on the segment - // instead of relying on close listeners - final StatsAndCount statsAndCount = stats2.get(readerCoreKey); - final Stats shardStats = statsAndCount.stats; - shardStats.cacheSize -= numEntries; - shardStats.ramBytesUsed -= sumRamBytesUsed; - statsAndCount.count -= numEntries; - if (statsAndCount.count == 0) { - stats2.remove(readerCoreKey); - } - } - } - - @Override - protected void onHit(Object readerCoreKey, Query filter) { - assert Thread.holdsLock(this); - super.onHit(readerCoreKey, filter); - final Stats shardStats = getStats(readerCoreKey); - shardStats.hitCount += 1; - } - - @Override - protected void onMiss(Object readerCoreKey, Query filter) { - assert Thread.holdsLock(this); - super.onMiss(readerCoreKey, filter); - final Stats shardStats = getOrCreateStats(readerCoreKey); - shardStats.missCount += 1; - } - }; + if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) { + cache = new ElasticsearchLRUQueryCache(count, size.bytes(), context -> true); + } else { + cache = new ElasticsearchLRUQueryCache(count, size.bytes()); + } sharedRamBytesUsed = 0; } @@ -316,4 +220,111 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, assert empty(shardStats.get(shardId)); shardStats.remove(shardId); } + + private class ElasticsearchLRUQueryCache extends LRUQueryCache { + + ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed, Predicate leavesToCache) { + super(maxSize, maxRamBytesUsed, leavesToCache); + } + + ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed) { + super(maxSize, maxRamBytesUsed); + } + + private Stats getStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + if (shardId == null) { + return null; + } + return shardStats.get(shardId); + } + + private Stats getOrCreateStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + Stats stats = shardStats.get(shardId); + if (stats == null) { + stats = new Stats(); + shardStats.put(shardId, stats); + } + return stats; + } + + // It's ok to not protect these callbacks by a lock since it is + // done in LRUQueryCache + @Override + protected void onClear() { + super.onClear(); + for (Stats stats : shardStats.values()) { + // don't throw away hit/miss + stats.cacheSize = 0; + stats.ramBytesUsed = 0; + } + sharedRamBytesUsed = 0; + } + + @Override + protected void onQueryCache(Query filter, long ramBytesUsed) { + super.onQueryCache(filter, ramBytesUsed); + sharedRamBytesUsed += ramBytesUsed; + } + + @Override + protected void onQueryEviction(Query filter, long ramBytesUsed) { + super.onQueryEviction(filter, ramBytesUsed); + sharedRamBytesUsed -= ramBytesUsed; + } + + @Override + protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { + super.onDocIdSetCache(readerCoreKey, ramBytesUsed); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.cacheSize += 1; + shardStats.cacheCount += 1; + shardStats.ramBytesUsed += ramBytesUsed; + + StatsAndCount statsAndCount = stats2.get(readerCoreKey); + if (statsAndCount == null) { + statsAndCount = new StatsAndCount(shardStats); + stats2.put(readerCoreKey, statsAndCount); + } + statsAndCount.count += 1; + } + + @Override + protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { + super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); + // onDocIdSetEviction might sometimes be called with a number + // of entries equal to zero if the cache for the given segment + // was already empty when the close listener was called + if (numEntries > 0) { + // We can't use ShardCoreKeyMap here because its core closed + // listener is called before the listener of the cache which + // triggers this eviction. So instead we use use stats2 that + // we only evict when nothing is cached anymore on the segment + // instead of relying on close listeners + final StatsAndCount statsAndCount = stats2.get(readerCoreKey); + final Stats shardStats = statsAndCount.stats; + shardStats.cacheSize -= numEntries; + shardStats.ramBytesUsed -= sumRamBytesUsed; + statsAndCount.count -= numEntries; + if (statsAndCount.count == 0) { + stats2.remove(readerCoreKey); + } + } + } + + @Override + protected void onHit(Object readerCoreKey, Query filter) { + super.onHit(readerCoreKey, filter); + final Stats shardStats = getStats(readerCoreKey); + shardStats.hitCount += 1; + } + + @Override + protected void onMiss(Object readerCoreKey, Query filter) { + super.onMiss(readerCoreKey, filter); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.missCount += 1; + } + } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index ba512379868..3ae02c7eadd 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -55,6 +56,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.env.NodeEnvironment; @@ -86,10 +88,14 @@ import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QueryPhase; @@ -124,7 +130,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; /** * */ -public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter { +public class IndicesService extends AbstractLifecycleComponent + implements IndicesClusterStateService.AllocatedIndices, IndexService.ShardStoreDeleter { public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = @@ -296,11 +303,14 @@ public class IndicesService extends AbstractLifecycleComponent i } /** - * Returns true if changes (adding / removing) indices, shards and so on are allowed. + * Checks if changes (adding / removing) indices, shards and so on are allowed. + * + * @throws IllegalStateException if no changes allowed. */ - public boolean changesAllowed() { - // we check on stop here since we defined stop when we delete the indices - return lifecycle.started(); + private void ensureChangesAllowed() { + if (lifecycle.started() == false) { + throw new IllegalStateException("Can't make changes to indices service, node is closed"); + } } @Override @@ -314,10 +324,9 @@ public class IndicesService extends AbstractLifecycleComponent i /** * Returns an IndexService for the specified index if exists otherwise returns null. - * */ - @Nullable - public IndexService indexService(Index index) { + @Override + public @Nullable IndexService indexService(Index index) { return indices.get(index.getUUID()); } @@ -339,11 +348,9 @@ public class IndicesService extends AbstractLifecycleComponent i * @param builtInListeners a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with the per-index listeners * @throws IndexAlreadyExistsException if the index already exists. */ + @Override public synchronized IndexService createIndex(final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, List builtInListeners) throws IOException { - - if (!lifecycle.started()) { - throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); - } + ensureChangesAllowed(); if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); } @@ -424,14 +431,44 @@ public class IndicesService extends AbstractLifecycleComponent i } } + @Override + public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService, + RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, + NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException { + ensureChangesAllowed(); + IndexService indexService = indexService(shardRouting.index()); + IndexShard indexShard = indexService.createShard(shardRouting); + indexShard.addShardFailureCallback(onShardFailure); + indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, + (type, mapping) -> { + assert recoveryState.getType() == RecoveryState.Type.LOCAL_SHARDS : + "mapping update consumer only required by local shards recovery"; + try { + nodeServicesProvider.getClient().admin().indices().preparePutMapping() + .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid + .setType(type) + .setSource(mapping.source().string()) + .get(); + } catch (IOException ex) { + throw new ElasticsearchException("failed to stringify mapping source", ex); + } + }, this); + return indexShard; + } + /** * Removes the given index from this service and releases all associated resources. Persistent parts of the index * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. * @param index the index to remove * @param reason the high level reason causing this removal */ + @Override public void removeIndex(Index index, String reason) { - removeIndex(index, reason, false); + try { + removeIndex(index, reason, false); + } catch (Throwable e) { + logger.warn("failed to remove index ({})", e, reason); + } } private void removeIndex(Index index, String reason, boolean delete) { @@ -516,14 +553,20 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to delete * @param reason the high level reason causing this delete */ - public void deleteIndex(Index index, String reason) throws IOException { - removeIndex(index, reason, true); + @Override + public void deleteIndex(Index index, String reason) { + try { + removeIndex(index, reason, true); + } catch (Throwable e) { + logger.warn("failed to delete index ({})", e, reason); + } } /** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index * but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)} */ + @Override public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { if (nodeEnv.hasNodeFile()) { String indexName = metaData.getIndex().getName(); @@ -683,8 +726,8 @@ public class IndicesService extends AbstractLifecycleComponent i * @param clusterState {@code ClusterState} to ensure the index is not part of it * @return IndexMetaData for the index loaded from disk */ - @Nullable - public IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) { + @Override + public @Nullable IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) { // this method should only be called when we know the index (name + uuid) is not part of the cluster state if (clusterState.metaData().index(index) != null) { throw new IllegalStateException("Cannot delete index [" + index + "], it is still part of the cluster state."); @@ -839,6 +882,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to process the pending deletes for * @param timeout the timeout used for processing pending deletes */ + @Override public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException { logger.debug("{} processing pending deletes", index); final long startTimeNS = System.nanoTime(); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 2c77f863c47..c0bfedc47ca 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -19,16 +19,13 @@ package org.elasticsearch.indices.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNode; @@ -37,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; @@ -48,17 +44,18 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexComponent; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; import org.elasticsearch.index.NodeServicesProvider; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardRelocatedException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.recovery.RecoveryFailedException; @@ -73,7 +70,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -82,14 +78,13 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; /** * */ public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateListener { - private final IndicesService indicesService; + final AllocatedIndices> indicesService; private final ClusterService clusterService; private final ThreadPool threadPool; private final RecoveryTargetService recoveryTargetService; @@ -102,11 +97,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent failedShards = ConcurrentCollections.newConcurrentMap(); + final ConcurrentMap failedShardsCache = ConcurrentCollections.newConcurrentMap(); private final RestoreService restoreService; private final RepositoriesService repositoriesService; - private final Object mutex = new Object(); private final FailedShardHandler failedShardHandler = new FailedShardHandler(); private final boolean sendRefreshMapping; @@ -120,6 +114,22 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent>) indicesService, + clusterService, threadPool, recoveryTargetService, shardStateAction, + nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, recoverySource, + nodeServicesProvider); + } + + // for tests + IndicesClusterStateService(Settings settings, + AllocatedIndices> indicesService, + ClusterService clusterService, + ThreadPool threadPool, RecoveryTargetService recoveryTargetService, + ShardStateAction shardStateAction, + NodeMappingRefreshAction nodeMappingRefreshAction, + RepositoriesService repositoriesService, RestoreService restoreService, + SearchService searchService, SyncedFlushService syncedFlushService, + RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) { super(settings); this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTargetService, searchService, syncedFlushService); this.indicesService = indicesService; @@ -149,87 +159,97 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService : indicesService) { + indicesService.removeIndex(indexService.index(), "cleaning index (disabled block persistence)"); // also cleans shards } - - cleanFailedShards(event); - - // cleaning up indices that are completely deleted so we won't need to worry about them - // when checking for shards - applyDeletedIndices(event); - applyDeletedShards(event); - // call after deleted shards so indices with no shards will be cleaned - applyCleanedIndices(event); - // make sure that newly created shards use the latest meta data - applyIndexMetaData(event); - applyNewIndices(event); - // apply mappings also updates new indices. TODO: make new indices good to begin with - applyMappings(event); - applyNewOrUpdatedShards(event); - } - } - - private void cleanFailedShards(final ClusterChangedEvent event) { - RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId()); - if (routingNode == null) { - failedShards.clear(); return; } - for (Iterator> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry entry = iterator.next(); - ShardRouting failedShardRouting = entry.getValue(); - ShardRouting matchedShardRouting = routingNode.getByShardId(failedShardRouting.shardId()); - if (matchedShardRouting == null || matchedShardRouting.isSameAllocation(failedShardRouting) == false) { + + updateFailedShardsCache(state); + + deleteIndices(event); // also deletes shards of deleted indices + + removeUnallocatedIndices(state); // also removes shards of removed indices + + failMissingShards(state); + + removeShards(state); + + updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache + + createIndices(state); + + createOrUpdateShards(state); + } + + /** + * Removes shard entries from the failed shards cache that are no longer allocated to this node by the master. + * Sends shard failures for shards that are marked as actively allocated to this node but don't actually exist on the node. + * Resends shard failures for shards that are still marked as allocated to this node but previously failed. + * + * @param state new cluster state + */ + private void updateFailedShardsCache(final ClusterState state) { + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + if (localRoutingNode == null) { + failedShardsCache.clear(); + return; + } + + DiscoveryNode masterNode = state.nodes().getMasterNode(); + + // remove items from cache which are not in our routing table anymore and resend failures that have not executed on master yet + for (Iterator> iterator = failedShardsCache.entrySet().iterator(); iterator.hasNext(); ) { + ShardRouting failedShardRouting = iterator.next().getValue(); + ShardRouting matchedRouting = localRoutingNode.getByShardId(failedShardRouting.shardId()); + if (matchedRouting == null || matchedRouting.isSameAllocation(failedShardRouting) == false) { iterator.remove(); + } else { + if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? + String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure"; + logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message); + shardStateAction.shardFailed(matchedRouting, matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER); + } } } } - private void applyDeletedIndices(final ClusterChangedEvent event) { + /** + * Deletes indices (with shard data). + * + * @param event cluster change event + */ + private void deleteIndices(final ClusterChangedEvent event) { final ClusterState previousState = event.previousState(); - final String localNodeId = event.state().nodes().getLocalNodeId(); + final ClusterState state = event.state(); + final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; for (Index index : event.indicesDeleted()) { if (logger.isDebugEnabled()) { logger.debug("[{}] cleaning index, no longer part of the metadata", index); } - final IndexService idxService = indicesService.indexService(index); + AllocatedIndex indexService = indicesService.indexService(index); final IndexSettings indexSettings; - if (idxService != null) { - indexSettings = idxService.getIndexSettings(); - deleteIndex(index, "index no longer part of the metadata"); + if (indexService != null) { + indexSettings = indexService.getIndexSettings(); + indicesService.deleteIndex(index, "index no longer part of the metadata"); } else if (previousState.metaData().hasIndex(index.getName())) { // The deleted index was part of the previous cluster state, but not loaded on the local node final IndexMetaData metaData = previousState.metaData().index(index); indexSettings = new IndexSettings(metaData, settings); - indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, event.state()); + indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state); } else { // The previous cluster state's metadata also does not contain the index, // which is what happens on node startup when an index was deleted while the @@ -255,10 +275,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService : indicesService) { + Index index = indexService.index(); + IndexMetaData indexMetaData = event.state().metaData().index(index); if (indexMetaData == null) { - assert false : "index" + indexService.index() + " exists locally, doesn't have a metadata but is not part " + assert false : "index" + index + " exists locally, doesn't have a metadata but is not part" + " of the delete index list. \nprevious state: " + event.previousState().prettyPrint() + "\n current state:\n" + event.state().prettyPrint(); - logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", - indexService.index()); - deleteIndex(indexService.index(), "isn't part of metadata (explicit check)"); + logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index); + indicesService.deleteIndex(index, "isn't part of metadata (explicit check)"); } } } - private void applyDeletedShards(final ClusterChangedEvent event) { - RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId()); - if (routingNode == null) { + /** + * Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough + * shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}). + * + * @param state new cluster state + */ + private void removeUnallocatedIndices(final ClusterState state) { + final String localNodeId = state.nodes().getLocalNodeId(); + assert localNodeId != null; + + Set indicesWithShards = new HashSet<>(); + RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId); + if (localRoutingNode != null) { // null e.g. if we are not a data node + for (ShardRouting shardRouting : localRoutingNode) { + indicesWithShards.add(shardRouting.index()); + } + } + + for (AllocatedIndex indexService : indicesService) { + Index index = indexService.index(); + if (indicesWithShards.contains(index) == false) { + logger.debug("{} removing index, no shards allocated", index); + indicesService.removeIndex(index, "removing index (no shards allocated)"); + } + } + } + + /** + * Notifies master about shards that don't exist but are supposed to be active on this node. + * + * @param state new cluster state + */ + private void failMissingShards(final ClusterState state) { + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + if (localRoutingNode == null) { return; } - - final Map> shardsByIndex = new HashMap<>(); - for (ShardRouting shard : routingNode) { - shardsByIndex.computeIfAbsent(shard.index(), k -> new HashSet<>()).add(shard.allocationId().getId()); + for (final ShardRouting shardRouting : localRoutingNode) { + ShardId shardId = shardRouting.shardId(); + if (shardRouting.initializing() == false && + failedShardsCache.containsKey(shardId) == false && + indicesService.getShardOrNull(shardId) == null) { + // the master thinks we are active, but we don't have this shard at all, mark it as failed + sendFailShard(shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", null); + } } + } - for (IndexService indexService : indicesService) { - Index index = indexService.index(); - IndexMetaData indexMetaData = event.state().metaData().index(index); - assert indexMetaData != null : "local index doesn't have metadata, should have been cleaned up by applyDeletedIndices: " + index; - // now, go over and delete shards that needs to get deleted - Set newShardAllocationIds = shardsByIndex.getOrDefault(index, Collections.emptySet()); - for (IndexShard existingShard : indexService) { - if (newShardAllocationIds.contains(existingShard.routingEntry().allocationId().getId()) == false) { - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - if (logger.isDebugEnabled()) { - logger.debug("{} removing shard (index is closed)", existingShard.shardId()); + /** + * Removes shards that are currently loaded by indicesService but have disappeared from the routing table of the current node. + * Also removes shards where the recovery source node has changed. + * This method does not delete the shard data. + * + * @param state new cluster state + */ + private void removeShards(final ClusterState state) { + final RoutingTable routingTable = state.routingTable(); + final DiscoveryNodes nodes = state.nodes(); + final String localNodeId = state.nodes().getLocalNodeId(); + assert localNodeId != null; + + // remove shards based on routing nodes (no deletion of data) + RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId); + for (AllocatedIndex indexService : indicesService) { + for (Shard shard : indexService) { + ShardRouting currentRoutingEntry = shard.routingEntry(); + ShardId shardId = currentRoutingEntry.shardId(); + ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId); + if (newShardRouting == null || newShardRouting.isSameAllocation(currentRoutingEntry) == false) { + // we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore + // once all shards are allocated + logger.debug("{} removing shard (not allocated)", shardId); + indexService.removeShard(shardId.id(), "removing shard (not allocated)"); + } else { + // remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards + if (newShardRouting.isPeerRecovery()) { + RecoveryState recoveryState = shard.recoveryState(); + final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting); + if (recoveryState.getSourceNode().equals(sourceNode) == false) { + if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) { + // getting here means that the shard was still recovering + logger.debug("{} removing shard (recovery source changed), current [{}], global [{}])", + shardId, currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)"); + } } - indexService.removeShard(existingShard.shardId().id(), "removing shard (index is closed)"); - } else { - // we can just remove the shard, without cleaning it locally, since we will clean it - // when all shards are allocated in the IndicesStore - if (logger.isDebugEnabled()) { - logger.debug("{} removing shard (not allocated)", existingShard.shardId()); - } - indexService.removeShard(existingShard.shardId().id(), "removing shard (not allocated)"); } } } } } - private void applyCleanedIndices(final ClusterChangedEvent event) { - // handle closed indices, since they are not allocated on a node once they are closed - // so applyDeletedIndices might not take them into account - for (IndexService indexService : indicesService) { - Index index = indexService.index(); - IndexMetaData indexMetaData = event.state().metaData().index(index); - if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) { - for (Integer shardId : indexService.shardIds()) { - logger.debug("{}[{}] removing shard (index is closed)", index, shardId); - try { - indexService.removeShard(shardId, "removing shard (index is closed)"); - } catch (Throwable e) { - logger.warn("{} failed to remove shard (index is closed)", e, index); - } - } - } - } - - final Set hasAllocations = new HashSet<>(); - final RoutingNode node = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId()); - // if no shards are allocated ie. if this node is a master-only node it can return nul - if (node != null) { - for (ShardRouting routing : node) { - hasAllocations.add(routing.index()); - } - } - for (IndexService indexService : indicesService) { - Index index = indexService.index(); - if (hasAllocations.contains(index) == false) { - assert indexService.shardIds().isEmpty() : - "no locally assigned shards, but index wasn't emptied by applyDeletedShards." - + " index " + index + ", shards: " + indexService.shardIds(); - if (logger.isDebugEnabled()) { - logger.debug("{} cleaning index (no shards allocated)", index); - } - // clean the index - removeIndex(index, "removing index (no shards allocated)"); - } - } - } - - private void applyIndexMetaData(ClusterChangedEvent event) { - if (!event.metaDataChanged()) { - return; - } - for (IndexMetaData indexMetaData : event.state().metaData()) { - if (!indicesService.hasIndex(indexMetaData.getIndex())) { - // we only create / update here - continue; - } - // if the index meta data didn't change, no need check for refreshed settings - if (!event.indexMetaDataChanged(indexMetaData)) { - continue; - } - Index index = indexMetaData.getIndex(); - IndexService indexService = indicesService.indexService(index); - if (indexService == null) { - // already deleted on us, ignore it - continue; - } - indexService.updateMetaData(indexMetaData); - } - } - - private void applyNewIndices(final ClusterChangedEvent event) { + private void createIndices(final ClusterState state) { // we only create indices for shards that are allocated - RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId()); - if (routingNode == null) { + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + if (localRoutingNode == null) { return; } - for (ShardRouting shard : routingNode) { - if (!indicesService.hasIndex(shard.index())) { - final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index()); - if (logger.isDebugEnabled()) { - logger.debug("[{}] creating index", indexMetaData.getIndex()); - } - try { - indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener); - } catch (Throwable e) { - sendFailShard(shard, "failed to create index", e); + // create map of indices to create with shards to fail if index creation fails + final Map> indicesToCreate = new HashMap<>(); + for (ShardRouting shardRouting : localRoutingNode) { + if (failedShardsCache.containsKey(shardRouting.shardId()) == false) { + final Index index = shardRouting.index(); + if (indicesService.indexService(index) == null) { + indicesToCreate.computeIfAbsent(index, k -> new ArrayList<>()).add(shardRouting); } } } - } - private void applyMappings(ClusterChangedEvent event) { - // go over and update mappings - for (IndexMetaData indexMetaData : event.state().metaData()) { - Index index = indexMetaData.getIndex(); - if (!indicesService.hasIndex(index)) { - // we only create / update here - continue; - } - boolean requireRefresh = false; - IndexService indexService = indicesService.indexService(index); - if (indexService == null) { - // got deleted on us, ignore (closing the node) - return; - } + for (Map.Entry> entry : indicesToCreate.entrySet()) { + final Index index = entry.getKey(); + final IndexMetaData indexMetaData = state.metaData().index(index); + logger.debug("[{}] creating index", index); + + AllocatedIndex indexService = null; try { - MapperService mapperService = indexService.mapperService(); - // go over and add the relevant mappings (or update them) - for (ObjectCursor cursor : indexMetaData.getMappings().values()) { - MappingMetaData mappingMd = cursor.value; - String mappingType = mappingMd.type(); - CompressedXContent mappingSource = mappingMd.source(); - requireRefresh |= processMapping(index.getName(), mapperService, mappingType, mappingSource); - } - if (requireRefresh && sendRefreshMapping) { - nodeMappingRefreshAction.nodeMappingRefresh(event.state(), - new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(), - event.state().nodes().getLocalNodeId()) + indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener); + if (indexService.updateMapping(indexMetaData) && sendRefreshMapping) { + nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), + new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), + indexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) ); } } catch (Throwable t) { - // if we failed the mappings anywhere, we need to fail the shards for this index, note, we safeguard - // by creating the processing the mappings on the master, or on the node the mapping was introduced on, - // so this failure typically means wrong node level configuration or something similar - for (IndexShard indexShard : indexService) { - ShardRouting shardRouting = indexShard.routingEntry(); - failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t); - } - } - } - } - - private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable { - // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same - // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the - // merge version of it, which it does when refreshing the mappings), and warn log it. - boolean requiresRefresh = false; - try { - DocumentMapper existingMapper = mapperService.documentMapper(mappingType); - - if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) { - String op = existingMapper == null ? "adding" : "updating"; - if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) { - logger.debug("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string()); - } else if (logger.isTraceEnabled()) { - logger.trace("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string()); + final String failShardReason; + if (indexService == null) { + failShardReason = "failed to create index"; } else { - logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, op, mappingType); + failShardReason = "failed to update mapping for index"; + indicesService.removeIndex(index, "removing index (mapping update failed)"); } - mapperService.merge(mappingType, mappingSource, MapperService.MergeReason.MAPPING_RECOVERY, true); - if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) { - logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource()); - requiresRefresh = true; + for (ShardRouting shardRouting : entry.getValue()) { + sendFailShard(shardRouting, failShardReason, t); } } - } catch (Throwable e) { - logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource); - throw e; } - return requiresRefresh; } - - private void applyNewOrUpdatedShards(final ClusterChangedEvent event) { - if (!indicesService.changesAllowed()) { + private void updateIndices(ClusterChangedEvent event) { + if (!event.metaDataChanged()) { return; } - - RoutingTable routingTable = event.state().routingTable(); - RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId()); - - if (routingNode == null) { - failedShards.clear(); - return; - } - - DiscoveryNodes nodes = event.state().nodes(); - for (final ShardRouting shardRouting : routingNode) { - final IndexService indexService = indicesService.indexService(shardRouting.index()); - if (indexService == null) { - // creation failed for some reasons - assert failedShards.containsKey(shardRouting.shardId()) : - "index has local allocation but is not created by applyNewIndices and is not failed " + shardRouting; - continue; - } - final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index()); - assert indexMetaData != null : "index has local allocation but no meta data. " + shardRouting.index(); - - final int shardId = shardRouting.id(); - - if (!indexService.hasShard(shardId) && shardRouting.started()) { - if (failedShards.containsKey(shardRouting.shardId())) { - if (nodes.getMasterNode() != null) { - String message = "master " + nodes.getMasterNode() + " marked shard as started, but shard has previous failed. resending shard failure"; - logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message); - shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER); + final ClusterState state = event.state(); + for (AllocatedIndex indexService : indicesService) { + final Index index = indexService.index(); + final IndexMetaData currentIndexMetaData = indexService.getIndexSettings().getIndexMetaData(); + final IndexMetaData newIndexMetaData = state.metaData().index(index); + assert newIndexMetaData != null : "index " + index + " should have been removed by deleteIndices"; + if (ClusterChangedEvent.indexMetaDataChanged(currentIndexMetaData, newIndexMetaData)) { + indexService.updateMetaData(newIndexMetaData); + try { + if (indexService.updateMapping(newIndexMetaData) && sendRefreshMapping) { + nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), + new NodeMappingRefreshAction.NodeMappingRefreshRequest(newIndexMetaData.getIndex().getName(), + newIndexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) + ); } - } else { - // the master thinks we are started, but we don't have this shard at all, mark it as failed - sendFailShard(shardRouting, "master [" + nodes.getMasterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null); - } - continue; - } + } catch (Throwable t) { + indicesService.removeIndex(indexService.index(), "removing index (mapping update failed)"); - IndexShard indexShard = indexService.getShardOrNull(shardId); - if (indexShard != null) { - ShardRouting currentRoutingEntry = indexShard.routingEntry(); - // if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated - // for example: a shard that recovers from one node and now needs to recover to another node, - // or a replica allocated and then allocating a primary because the primary failed on another node - boolean shardHasBeenRemoved = false; - assert currentRoutingEntry.isSameAllocation(shardRouting) : - "local shard has a different allocation id but wasn't cleaning by applyDeletedShards. " - + "cluster state: " + shardRouting + " local: " + currentRoutingEntry; - if (shardRouting.isPeerRecovery()) { - RecoveryState recoveryState = indexShard.recoveryState(); - final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, shardRouting); - if (recoveryState.getSourceNode().equals(sourceNode) == false) { - if (recoveryTargetService.cancelRecoveriesForShard(currentRoutingEntry.shardId(), "recovery source node changed")) { - // getting here means that the shard was still recovering - logger.debug("[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting); - indexService.removeShard(shardRouting.id(), "removing shard (recovery source node changed)"); - shardHasBeenRemoved = true; + // fail shards that would be created or updated by createOrUpdateShards + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + if (localRoutingNode != null) { + for (final ShardRouting shardRouting : localRoutingNode) { + if (shardRouting.index().equals(index) && failedShardsCache.containsKey(shardRouting.shardId()) == false) { + sendFailShard(shardRouting, "failed to update mapping for index", t); + } } } } - - if (shardHasBeenRemoved == false) { - try { - indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false); - } catch (Throwable e) { - failAndRemoveShard(shardRouting, indexService, true, "failed updating shard routing entry", e); - } - } - } - - if (shardRouting.initializing()) { - applyInitializingShard(event.state(), indexService, shardRouting); } } } - private void applyInitializingShard(final ClusterState state, IndexService indexService, final ShardRouting shardRouting) { - final RoutingTable routingTable = state.routingTable(); - final DiscoveryNodes nodes = state.getNodes(); - final int shardId = shardRouting.id(); + private void createOrUpdateShards(final ClusterState state) { + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + if (localRoutingNode == null) { + return; + } - if (indexService.hasShard(shardId)) { - IndexShard indexShard = indexService.getShard(shardId); - if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) { - // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting - // for master to confirm a shard started message (either master failover, or a cluster event before - // we managed to tell the master we started), mark us as started - if (logger.isTraceEnabled()) { - logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}", - indexShard.shardId(), indexShard.state(), nodes.getMasterNode()); - } - if (nodes.getMasterNode() != null) { - shardStateAction.shardStarted(shardRouting, - "master " + nodes.getMasterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started", - SHARD_STATE_ACTION_LISTENER); - } - return; - } else { - if (indexShard.ignoreRecoveryAttempt()) { - logger.trace("ignoring recovery instruction for an existing shard {} (shard state: [{}])", indexShard.shardId(), indexShard.state()); - return; + DiscoveryNodes nodes = state.nodes(); + RoutingTable routingTable = state.routingTable(); + + for (final ShardRouting shardRouting : localRoutingNode) { + ShardId shardId = shardRouting.shardId(); + if (failedShardsCache.containsKey(shardId) == false) { + AllocatedIndex indexService = indicesService.indexService(shardId.getIndex()); + assert indexService != null : "index " + shardId.getIndex() + " should have been created by createIndices"; + Shard shard = indexService.getShardOrNull(shardId.id()); + if (shard == null) { + assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards"; + createShard(nodes, routingTable, shardRouting, indexService); + } else { + updateShard(nodes, shardRouting, shard); } } } + } + + private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting, + AllocatedIndex indexService) { + assert shardRouting.initializing() : "only allow shard creation for initializing shard but was " + shardRouting; - // if we're in peer recovery, try to find out the source node now so in case it fails, we will not create the index shard DiscoveryNode sourceNode = null; if (shardRouting.isPeerRecovery()) { sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, shardRouting); @@ -595,50 +516,49 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { - try { - nodeServicesProvider.getClient().admin().indices().preparePutMapping() - .setConcreteIndex(indexService.index()) // concrete index - no name clash, it uses uuid - .setType(type) - .setSource(mapping.source().string()) - .get(); - } catch (IOException ex) { - throw new ElasticsearchException("failed to stringify mapping source", ex); - } - }, indicesService); + final IndexShardState state = shard.state(); + if (shardRouting.initializing() && (state == IndexShardState.STARTED || state == IndexShardState.POST_RECOVERY)) { + // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting + // for master to confirm a shard started message (either master failover, or a cluster event before + // we managed to tell the master we started), mark us as started + if (logger.isTraceEnabled()) { + logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}", + shardRouting.shardId(), state, nodes.getMasterNode()); + } + if (nodes.getMasterNode() != null) { + shardStateAction.shardStarted(shardRouting, "master " + nodes.getMasterNode() + + " marked shard as initializing, but shard state is [" + state + "], mark shard as started", + SHARD_STATE_ACTION_LISTENER); + } + } } /** @@ -646,7 +566,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService = indicesService.indexService(shardRouting.shardId().getIndex()); + if (indexService != null) { + indexService.removeShard(shardRouting.shardId().id(), message); } + } catch (ShardNotFoundException e) { + // the node got closed on us, ignore it + } catch (Throwable e1) { + logger.warn("[{}][{}] failed to remove shard after failure ([{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), + message); } if (sendShardFailure) { sendFailShard(shardRouting, message, failure); @@ -760,23 +683,156 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { @Override public void handle(final IndexShard.ShardFailure shardFailure) { - final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex()); final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { - synchronized (mutex) { - failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); + synchronized (IndicesClusterStateService.this) { + failAndRemoveShard(shardRouting, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); } }); } } + + public interface Shard { + + /** + * Returns the shard id of this shard. + */ + ShardId shardId(); + + /** + * Returns the latest cluster routing entry received with this shard. + */ + ShardRouting routingEntry(); + + /** + * Returns the latest internal shard state. + */ + IndexShardState state(); + + /** + * Returns the recovery state associated with this shard. + */ + RecoveryState recoveryState(); + + /** + * Updates the shards routing entry. This mutate the shards internal state depending + * on the changes that get introduced by the new routing value. This method will persist shard level metadata. + * + * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted + * @throws IOException if shard state could not be persisted + */ + void updateRoutingEntry(ShardRouting shardRouting) throws IOException; + } + + public interface AllocatedIndex extends Iterable, IndexComponent { + + /** + * Returns the index settings of this index. + */ + IndexSettings getIndexSettings(); + + /** + * Updates the meta data of this index. Changes become visible through {@link #getIndexSettings()} + */ + void updateMetaData(IndexMetaData indexMetaData); + + /** + * Checks if index requires refresh from master. + */ + boolean updateMapping(IndexMetaData indexMetaData) throws IOException; + + /** + * Returns shard with given id. + */ + @Nullable T getShardOrNull(int shardId); + + /** + * Removes shard with given id. + */ + void removeShard(int shardId, String message); + } + + public interface AllocatedIndices> extends Iterable { + + /** + * Creates a new {@link IndexService} for the given metadata. + * @param indexMetaData the index metadata to create the index for + * @param builtInIndexListener a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with + * the per-index listeners + * @throws IndexAlreadyExistsException if the index already exists. + */ + U createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, + List builtInIndexListener) throws IOException; + + /** + * Verify that the contents on disk for the given index is deleted; if not, delete the contents. + * This method assumes that an index is already deleted in the cluster state and/or explicitly + * through index tombstones. + * @param index {@code Index} to make sure its deleted from disk + * @param clusterState {@code ClusterState} to ensure the index is not part of it + * @return IndexMetaData for the index loaded from disk + */ + IndexMetaData verifyIndexIsDeleted(Index index, ClusterState clusterState); + + /** + * Deletes the given index. Persistent parts of the index + * like the shards files, state and transaction logs are removed once all resources are released. + * + * Equivalent to {@link #removeIndex(Index, String)} but fires + * different lifecycle events to ensure pending resources of this index are immediately removed. + * @param index the index to delete + * @param reason the high level reason causing this delete + */ + void deleteIndex(Index index, String reason); + + /** + * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index + * but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)} + */ + void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState); + + /** + * Removes the given index from this service and releases all associated resources. Persistent parts of the index + * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. + * @param index the index to remove + * @param reason the high level reason causing this removal + */ + void removeIndex(Index index, String reason); + + /** + * Returns an IndexService for the specified index if exists otherwise returns null. + */ + @Nullable U indexService(Index index); + + /** + * Creates shard for the specified shard routing and starts recovery, + */ + T createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService, + RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, + NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException; + + /** + * Returns shard for the specified id if it exists otherwise returns null. + */ + default T getShardOrNull(ShardId shardId) { + U indexRef = indexService(shardId.getIndex()); + if (indexRef != null) { + return indexRef.getShardOrNull(shardId.id()); + } + return null; + } + + void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, InterruptedException; + } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index f3cb76199dc..16e4d62a721 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -114,11 +114,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final ClusterState state = clusterService.state(); final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); - int totalNumberOfShards = 0; int numberOfShards = 0; for (Index index : concreteIndices) { final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); - totalNumberOfShards += indexMetaData.getTotalNumberOfShards(); numberOfShards += indexMetaData.getNumberOfShards(); results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); @@ -127,7 +125,6 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL listener.onResponse(new SyncedFlushResponse(results)); return; } - final int finalTotalNumberOfShards = totalNumberOfShards; final CountDown countDown = new CountDown(numberOfShards); for (final Index concreteIndex : concreteIndices) { @@ -136,7 +133,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final int indexNumberOfShards = indexMetaData.getNumberOfShards(); for (int shard = 0; shard < indexNumberOfShards; shard++) { final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); - attemptSyncedFlush(shardId, new ActionListener() { + innerAttemptSyncedFlush(shardId, state, new ActionListener() { @Override public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { results.get(index).add(syncedFlushResult); @@ -148,7 +145,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void onFailure(Throwable e) { logger.debug("{} unexpected error while executing synced flush", shardId); - results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage())); + final int totalShards = indexMetaData.getNumberOfReplicas() + 1; + results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage())); if (countDown.countDown()) { listener.onResponse(new SyncedFlushResponse(results)); } @@ -185,8 +183,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL * Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies. **/ public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) { + innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener); + } + + private void innerAttemptSyncedFlush(final ShardId shardId, final ClusterState state, final ActionListener actionListener) { try { - final ClusterState state = clusterService.state(); final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state); final List activeShards = shardRoutingTable.activeShards(); final int totalShards = shardRoutingTable.getSize(); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 8fd08d9f8fb..49cdb737ed3 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.recovery; import org.apache.lucene.util.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -76,7 +75,6 @@ public final class RecoveryFileChunkRequest extends TransportRequest { return position; } - @Nullable public String checksum() { return metaData.checksum(); } @@ -105,11 +103,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest { String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); - String checksum = in.readOptionalString(); + String checksum = in.readString(); content = in.readBytesReference(); - Version writtenBy = null; - String versionString = in.readOptionalString(); - writtenBy = Lucene.parseVersionLenient(versionString, null); + Version writtenBy = Lucene.parseVersionLenient(in.readString(), null); + assert writtenBy != null; metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); @@ -124,9 +121,9 @@ public final class RecoveryFileChunkRequest extends TransportRequest { out.writeString(metaData.name()); out.writeVLong(position); out.writeVLong(metaData.length()); - out.writeOptionalString(metaData.checksum()); + out.writeString(metaData.checksum()); out.writeBytesReference(content); - out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString()); + out.writeString(metaData.writtenBy().toString()); out.writeBoolean(lastChunk); out.writeVInt(totalTranslogOps); out.writeLong(sourceThrottleTimeInNanos); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java index 23d390dfcfe..04900705e0a 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -115,4 +115,5 @@ public abstract class BlobStoreFormat { } } + } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 57841466a62..b7a6d714e63 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -826,9 +826,7 @@ public class SearchService extends AbstractLifecycleComponent imp if (context.scrollContext() == null) { throw new SearchContextException(context, "`slice` cannot be used outside of a scroll context"); } - context.sliceFilter(source.slice().toFilter(queryShardContext, - context.shardTarget().getShardId().getId(), - queryShardContext.getIndexSettings().getNumberOfShards())); + context.sliceBuilder(source.slice()); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 38fb0e6a431..2d8ea318fd6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -27,6 +27,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.aggregations.support.AggregationPath.PathElement; +import org.elasticsearch.search.profile.Profilers; +import org.elasticsearch.search.profile.aggregation.ProfilingAggregator; import java.io.IOException; import java.util.ArrayList; @@ -81,7 +83,12 @@ public class AggregatorFactories { // propagate the fact that only bucket 0 will be collected with single-bucket // aggs final boolean collectsFromSingleBucket = false; - aggregators[i] = factories[i].create(parent, collectsFromSingleBucket); + Aggregator factory = factories[i].create(parent, collectsFromSingleBucket); + Profilers profilers = factory.context().searchContext().getProfilers(); + if (profilers != null) { + factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler()); + } + aggregators[i] = factory; } return aggregators; } @@ -92,7 +99,12 @@ public class AggregatorFactories { for (int i = 0; i < factories.length; i++) { // top-level aggs only get called with bucket 0 final boolean collectsFromSingleBucket = true; - aggregators[i] = factories[i].create(null, collectsFromSingleBucket); + Aggregator factory = factories[i].create(null, collectsFromSingleBucket); + Profilers profilers = factory.context().searchContext().getProfilers(); + if (profilers != null) { + factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler()); + } + aggregators[i] = factory; } return aggregators; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 3fced89a014..854838b7441 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -28,13 +28,139 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; - import java.io.IOException; import java.util.List; import java.util.Map; public abstract class AggregatorFactory> { + public static final class MultiBucketAggregatorWrapper extends Aggregator { + private final BigArrays bigArrays; + private final Aggregator parent; + private final AggregatorFactory factory; + private final Aggregator first; + ObjectArray aggregators; + ObjectArray collectors; + + MultiBucketAggregatorWrapper(BigArrays bigArrays, AggregationContext context, Aggregator parent, AggregatorFactory factory, + Aggregator first) { + this.bigArrays = bigArrays; + this.parent = parent; + this.factory = factory; + this.first = first; + context.searchContext().addReleasable(this, Lifetime.PHASE); + aggregators = bigArrays.newObjectArray(1); + aggregators.set(0, first); + collectors = bigArrays.newObjectArray(1); + } + + public Class getWrappedClass() { + return first.getClass(); + } + + @Override + public String name() { + return first.name(); + } + + @Override + public AggregationContext context() { + return first.context(); + } + + @Override + public Aggregator parent() { + return first.parent(); + } + + @Override + public boolean needsScores() { + return first.needsScores(); + } + + @Override + public Aggregator subAggregator(String name) { + throw new UnsupportedOperationException(); + } + + @Override + public void preCollection() throws IOException { + for (long i = 0; i < aggregators.size(); ++i) { + final Aggregator aggregator = aggregators.get(i); + if (aggregator != null) { + aggregator.preCollection(); + } + } + } + + @Override + public void postCollection() throws IOException { + for (long i = 0; i < aggregators.size(); ++i) { + final Aggregator aggregator = aggregators.get(i); + if (aggregator != null) { + aggregator.postCollection(); + } + } + } + + @Override + public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) { + for (long i = 0; i < collectors.size(); ++i) { + collectors.set(i, null); + } + return new LeafBucketCollector() { + Scorer scorer; + + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + + @Override + public void collect(int doc, long bucket) throws IOException { + collectors = bigArrays.grow(collectors, bucket + 1); + + LeafBucketCollector collector = collectors.get(bucket); + if (collector == null) { + aggregators = bigArrays.grow(aggregators, bucket + 1); + Aggregator aggregator = aggregators.get(bucket); + if (aggregator == null) { + aggregator = factory.create(parent, true); + aggregator.preCollection(); + aggregators.set(bucket, aggregator); + } + collector = aggregator.getLeafCollector(ctx); + collector.setScorer(scorer); + collectors.set(bucket, collector); + } + collector.collect(doc, 0); + } + + }; + } + + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + if (bucket < aggregators.size()) { + Aggregator aggregator = aggregators.get(bucket); + if (aggregator != null) { + return aggregator.buildAggregation(0); + } + } + return buildEmptyAggregation(); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return first.buildEmptyAggregation(); + } + + @Override + public void close() { + Releasables.close(aggregators, collectors); + } + } + protected final String name; protected final Type type; protected final AggregatorFactory parent; @@ -112,120 +238,7 @@ public abstract class AggregatorFactory> { final Aggregator parent) throws IOException { final Aggregator first = factory.create(parent, true); final BigArrays bigArrays = context.bigArrays(); - return new Aggregator() { - - ObjectArray aggregators; - ObjectArray collectors; - - { - context.searchContext().addReleasable(this, Lifetime.PHASE); - aggregators = bigArrays.newObjectArray(1); - aggregators.set(0, first); - collectors = bigArrays.newObjectArray(1); - } - - @Override - public String name() { - return first.name(); - } - - @Override - public AggregationContext context() { - return first.context(); - } - - @Override - public Aggregator parent() { - return first.parent(); - } - - @Override - public boolean needsScores() { - return first.needsScores(); - } - - @Override - public Aggregator subAggregator(String name) { - throw new UnsupportedOperationException(); - } - - @Override - public void preCollection() throws IOException { - for (long i = 0; i < aggregators.size(); ++i) { - final Aggregator aggregator = aggregators.get(i); - if (aggregator != null) { - aggregator.preCollection(); - } - } - } - - @Override - public void postCollection() throws IOException { - for (long i = 0; i < aggregators.size(); ++i) { - final Aggregator aggregator = aggregators.get(i); - if (aggregator != null) { - aggregator.postCollection(); - } - } - } - - @Override - public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) { - for (long i = 0; i < collectors.size(); ++i) { - collectors.set(i, null); - } - return new LeafBucketCollector() { - Scorer scorer; - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - public void collect(int doc, long bucket) throws IOException { - aggregators = bigArrays.grow(aggregators, bucket + 1); - collectors = bigArrays.grow(collectors, bucket + 1); - - LeafBucketCollector collector = collectors.get(bucket); - if (collector == null) { - Aggregator aggregator = aggregators.get(bucket); - if (aggregator == null) { - aggregator = factory.create(parent, true); - aggregator.preCollection(); - aggregators.set(bucket, aggregator); - } - collector = aggregator.getLeafCollector(ctx); - collector.setScorer(scorer); - collectors.set(bucket, collector); - } - collector.collect(doc, 0); - } - - }; - } - - @Override - public InternalAggregation buildAggregation(long bucket) throws IOException { - if (bucket < aggregators.size()) { - Aggregator aggregator = aggregators.get(bucket); - if (aggregator != null) { - return aggregator.buildAggregation(0); - } - } - return buildEmptyAggregation(); - } - - @Override - public InternalAggregation buildEmptyAggregation() { - return first.buildEmptyAggregation(); - } - - @Override - public void close() { - Releasables.close(aggregators, collectors); - } - }; + return new MultiBucketAggregatorWrapper(bigArrays, context, parent, factory, first); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java index b5ed80a0022..0793bacf722 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java index 192ad6c28dc..ec838e7dd41 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.spatial.util.GeoEncodingUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -82,9 +82,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator { counts.increment(bucket, valueCount); // get the previous GeoPoint if a moving avg was computed if (prevCounts > 0) { - final GeoPoint centroid = GeoPoint.fromIndexLong(centroids.get(bucket)); - pt[0] = centroid.lon(); - pt[1] = centroid.lat(); + final long mortonCode = centroids.get(bucket); + pt[0] = GeoPointField.decodeLongitude(mortonCode); + pt[1] = GeoPointField.decodeLatitude(mortonCode); } // update the moving average for (int i = 0; i < valueCount; ++i) { @@ -92,7 +92,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator { pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts; pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts; } - centroids.set(bucket, GeoEncodingUtils.mortonHash(pt[1], pt[0])); + // TODO: we do not need to interleave the lat and lon bits here + // should we just store them contiguously? + centroids.set(bucket, GeoPointField.encodeLatLon(pt[1], pt[0])); } } }; @@ -104,8 +106,10 @@ public final class GeoCentroidAggregator extends MetricsAggregator { return buildEmptyAggregation(); } final long bucketCount = counts.get(bucket); - final GeoPoint bucketCentroid = (bucketCount > 0) ? GeoPoint.fromIndexLong(centroids.get(bucket)) : - new GeoPoint(Double.NaN, Double.NaN); + final long mortonCode = centroids.get(bucket); + final GeoPoint bucketCentroid = (bucketCount > 0) + ? new GeoPoint(GeoPointField.decodeLatitude(mortonCode), GeoPointField.decodeLongitude(mortonCode)) + : null; return new InternalGeoCentroid(name, bucketCentroid , bucketCount, pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java index 2798169b699..2bb3056ca66 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; -import org.apache.lucene.spatial.util.GeoEncodingUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -61,6 +61,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G public InternalGeoCentroid(String name, GeoPoint centroid, long count, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); + assert (centroid == null) == (count == 0); this.centroid = centroid; assert count >= 0; this.count = count; @@ -68,7 +69,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G @Override public GeoPoint centroid() { - return (centroid == null || Double.isNaN(centroid.lon()) ? null : centroid); + return centroid; } @Override @@ -128,7 +129,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G protected void doReadFrom(StreamInput in) throws IOException { count = in.readVLong(); if (in.readBoolean()) { - centroid = GeoPoint.fromIndexLong(in.readLong()); + final long hash = in.readLong(); + centroid = new GeoPoint(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash)); } else { centroid = null; } @@ -139,7 +141,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - out.writeLong(GeoEncodingUtils.mortonHash(centroid.lat(), centroid.lon())); + // should we just write lat and lon separately? + out.writeLong(GeoPointField.encodeLatLon(centroid.lat(), centroid.lon())); } else { out.writeBoolean(false); } diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index 36fe562a568..b2ce044e4fc 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -51,8 +51,9 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; @@ -407,7 +408,7 @@ public class SearchPhaseController extends AbstractComponent { //Collect profile results SearchProfileShardResults shardResults = null; if (!queryResults.isEmpty() && firstResult.profileResults() != null) { - Map> profileResults = new HashMap<>(queryResults.size()); + Map profileResults = new HashMap<>(queryResults.size()); for (AtomicArray.Entry entry : queryResults) { String key = entry.value.queryResult().shardTarget().toString(); profileResults.put(key, entry.value.queryResult().profileResults()); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 31921457207..f34da5301d5 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -175,7 +175,7 @@ public final class InnerHitsContext { @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } NestedChildrenQuery other = (NestedChildrenQuery) obj; @@ -187,7 +187,7 @@ public final class InnerHitsContext { @Override public int hashCode() { - int hash = super.hashCode(); + int hash = classHash(); hash = 31 * hash + parentFilter.hashCode(); hash = 31 * hash + childFilter.hashCode(); hash = 31 * hash + docId; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index df1007ebc71..50e91e082cd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -122,7 +122,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { weight = super.createWeight(query, needsScores); } finally { profile.stopAndRecordTime(); - profiler.pollLastQuery(); + profiler.pollLastElement(); } return new ProfileWeight(query, weight, profile); } else { diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 30e994b7656..06df04db8a0 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -68,6 +68,7 @@ import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; +import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -116,7 +117,7 @@ public class DefaultSearchContext extends SearchContext { private boolean trackScores = false; // when sorting, track scores as well... private FieldDoc searchAfter; // filter for sliced scroll - private Query sliceFilter; + private SliceBuilder sliceBuilder; /** * The original query as sent by the user without the types and aliases @@ -212,13 +213,23 @@ public class DefaultSearchContext extends SearchContext { if (rescoreContext.window() > maxWindow) { throw new QueryPhaseExecutionException(this, "Rescore window [" + rescoreContext.window() + "] is too large. It must " + "be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results to be " - + "rescored. This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + + "rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting."); } } } + if (sliceBuilder != null) { + int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll(); + int numSlices = sliceBuilder.getMax(); + if (numSlices > sliceLimit) { + throw new QueryPhaseExecutionException(this, "The number of slices [" + numSlices + "] is too large. It must " + + "be less than [" + sliceLimit + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting."); + } + } + // initialize the filtering alias based on the provided filters aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases()); @@ -257,9 +268,11 @@ public class DefaultSearchContext extends SearchContext { @Nullable public Query searchFilter(String[] types) { Query typesFilter = createSearchFilter(types, aliasFilter, mapperService().hasNested()); - if (sliceFilter == null) { + if (sliceBuilder == null) { return typesFilter; } + Query sliceFilter = sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(), + queryShardContext.getIndexSettings().getNumberOfShards()); if (typesFilter == null) { return sliceFilter; } @@ -562,8 +575,8 @@ public class DefaultSearchContext extends SearchContext { return searchAfter; } - public SearchContext sliceFilter(Query filter) { - this.sliceFilter = filter; + public SearchContext sliceBuilder(SliceBuilder sliceBuilder) { + this.sliceBuilder = sliceBuilder; return this; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 9c33889dc9c..26410cc9680 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -28,13 +28,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.Map; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; @@ -99,7 +98,7 @@ public class InternalSearchResponse implements Streamable, ToXContent { * * @return Profile results */ - public Map> profile() { + public Map profile() { if (profileResults == null) { return Collections.emptyMap(); } diff --git a/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java new file mode 100644 index 00000000000..31cb3c21237 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.search.profile.query.QueryProfileBreakdown; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.List; +import java.util.Map; + +public abstract class AbstractInternalProfileTree, E> { + + protected ArrayList timings; + /** Maps the Query to it's list of children. This is basically the dependency tree */ + protected ArrayList> tree; + /** A list of the original queries, keyed by index position */ + protected ArrayList elements; + /** A list of top-level "roots". Each root can have its own tree of profiles */ + protected ArrayList roots; + /** A temporary stack used to record where we are in the dependency tree. */ + protected Deque stack; + private int currentToken = 0; + + public AbstractInternalProfileTree() { + timings = new ArrayList<>(10); + stack = new ArrayDeque<>(10); + tree = new ArrayList<>(10); + elements = new ArrayList<>(10); + roots = new ArrayList<>(10); + } + + /** + * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those + * that are past the rewrite phase and are now being wrapped by createWeight() ) follow + * a recursive progression. We can track the dependency tree by a simple stack + * + * The only hiccup is that the first scoring query will be identical to the last rewritten + * query, so we need to take special care to fix that + * + * @param query The scoring query we wish to profile + * @return A ProfileBreakdown for this query + */ + public PB getProfileBreakdown(E query) { + int token = currentToken; + + boolean stackEmpty = stack.isEmpty(); + + // If the stack is empty, we are a new root query + if (stackEmpty) { + + // We couldn't find a rewritten query to attach to, so just add it as a + // top-level root. This is just a precaution: it really shouldn't happen. + // We would only get here if a top-level query that never rewrites for some reason. + roots.add(token); + + // Increment the token since we are adding a new node, but notably, do not + // updateParent() because this was added as a root + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + updateParent(token); + + // Increment the token since we are adding a new node + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + /** + * Helper method to add a new node to the dependency tree. + * + * Initializes a new list in the dependency tree, saves the query and + * generates a new {@link QueryProfileBreakdown} to track the timings of + * this query + * + * @param element + * The element to profile + * @param token + * The assigned token for this element + * @return A ProfileBreakdown to profile this element + */ + private PB addDependencyNode(E element, int token) { + + // Add a new slot in the dependency tree + tree.add(new ArrayList<>(5)); + + // Save our query for lookup later + elements.add(element); + + PB queryTimings = createProfileBreakdown(); + timings.add(token, queryTimings); + return queryTimings; + } + + protected abstract PB createProfileBreakdown(); + + /** + * Removes the last (e.g. most recent) value on the stack + */ + public void pollLast() { + stack.pollLast(); + } + + /** + * After the query has been run and profiled, we need to merge the flat timing map + * with the dependency graph to build a data structure that mirrors the original + * query tree + * + * @return a hierarchical representation of the profiled query tree + */ + public List getTree() { + ArrayList results = new ArrayList<>(5); + for (Integer root : roots) { + results.add(doGetTree(root)); + } + return results; + } + + /** + * Recursive helper to finalize a node in the dependency tree + * @param token The node we are currently finalizing + * @return A hierarchical representation of the tree inclusive of children at this level + */ + private ProfileResult doGetTree(int token) { + E element = elements.get(token); + PB breakdown = timings.get(token); + Map timings = breakdown.toTimingMap(); + List children = tree.get(token); + List childrenProfileResults = Collections.emptyList(); + + if (children != null) { + childrenProfileResults = new ArrayList<>(children.size()); + for (Integer child : children) { + ProfileResult childNode = doGetTree(child); + childrenProfileResults.add(childNode); + } + } + + // TODO this would be better done bottom-up instead of top-down to avoid + // calculating the same times over and over...but worth the effort? + long nodeTime = getNodeTime(timings, childrenProfileResults); + String type = getTypeFromElement(element); + String description = getDescriptionFromElement(element); + return new ProfileResult(type, description, timings, childrenProfileResults, nodeTime); + } + + protected abstract String getTypeFromElement(E element); + + protected abstract String getDescriptionFromElement(E element); + + /** + * Internal helper to add a child to the current parent node + * + * @param childToken The child to add to the current parent + */ + private void updateParent(int childToken) { + Integer parent = stack.peekLast(); + ArrayList parentNode = tree.get(parent); + parentNode.add(childToken); + tree.set(parent, parentNode); + } + + /** + * Internal helper to calculate the time of a node, inclusive of children + * + * @param timings + * A map of breakdown timing for the node + * @param children + * All children profile results at this node + * @return The total time at this node, inclusive of children + */ + private static long getNodeTime(Map timings, List children) { + long nodeTime = 0; + for (long time : timings.values()) { + nodeTime += time; + } + + // Then add up our children + for (ProfileResult child : children) { + nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren()); + } + return nodeTime; + } + +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java new file mode 100644 index 00000000000..a7ccb72785e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import java.util.List; + +public class AbstractProfiler, E> { + + protected final AbstractInternalProfileTree profileTree; + + public AbstractProfiler(AbstractInternalProfileTree profileTree) { + this.profileTree = profileTree; + } + + /** + * Get the {@link AbstractProfileBreakdown} for the given element in the + * tree, potentially creating it if it did not exist. + */ + public PB getQueryBreakdown(E query) { + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on the stack. + */ + public void pollLastElement() { + profileTree.pollLast(); + } + + /** + * @return a hierarchical representation of the profiled tree + */ + public List getTree() { + return profileTree.getTree(); + } + +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java index 9def3db7582..2a1fb0ba9b1 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java @@ -22,83 +22,50 @@ package org.elasticsearch.search.profile; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.profile.query.CollectorResult; +import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -/** - * A container class to hold the profile results for a single shard in the request. - * Contains a list of query profiles, a collector tree and a total rewrite tree. - */ -public final class ProfileShardResult implements Writeable, ToXContent { +public class ProfileShardResult implements Writeable { - private final List queryProfileResults; + private final List queryProfileResults; - private final CollectorResult profileCollector; + private final AggregationProfileShardResult aggProfileShardResult; - private final long rewriteTime; - - public ProfileShardResult(List queryProfileResults, long rewriteTime, - CollectorResult profileCollector) { - assert(profileCollector != null); - this.queryProfileResults = queryProfileResults; - this.profileCollector = profileCollector; - this.rewriteTime = rewriteTime; + public ProfileShardResult(List queryProfileResults, AggregationProfileShardResult aggProfileShardResult) { + this.aggProfileShardResult = aggProfileShardResult; + this.queryProfileResults = Collections.unmodifiableList(queryProfileResults); } - /** - * Read from a stream. - */ public ProfileShardResult(StreamInput in) throws IOException { int profileSize = in.readVInt(); - queryProfileResults = new ArrayList<>(profileSize); - for (int j = 0; j < profileSize; j++) { - queryProfileResults.add(new ProfileResult(in)); + List queryProfileResults = new ArrayList<>(profileSize); + for (int i = 0; i < profileSize; i++) { + QueryProfileShardResult result = new QueryProfileShardResult(in); + queryProfileResults.add(result); } - - profileCollector = new CollectorResult(in); - rewriteTime = in.readLong(); + this.queryProfileResults = Collections.unmodifiableList(queryProfileResults); + this.aggProfileShardResult = new AggregationProfileShardResult(in); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(queryProfileResults.size()); - for (ProfileResult p : queryProfileResults) { - p.writeTo(out); + for (QueryProfileShardResult queryShardResult : queryProfileResults) { + queryShardResult.writeTo(out); } - profileCollector.writeTo(out); - out.writeLong(rewriteTime); + aggProfileShardResult.writeTo(out); } - - public List getQueryResults() { - return Collections.unmodifiableList(queryProfileResults); + public List getQueryProfileResults() { + return queryProfileResults; } - public long getRewriteTime() { - return rewriteTime; - } - - public CollectorResult getCollectorResult() { - return profileCollector; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray("query"); - for (ProfileResult p : queryProfileResults) { - p.toXContent(builder, params); - } - builder.endArray(); - builder.field("rewrite_time", rewriteTime); - builder.startArray("collector"); - profileCollector.toXContent(builder, params); - builder.endArray(); - return builder; + public AggregationProfileShardResult getAggregationProfileResults() { + return aggProfileShardResult; } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java index e9e6d88db18..d754be41f6d 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java +++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java @@ -20,22 +20,25 @@ package org.elasticsearch.search.profile; import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.profile.aggregation.AggregationProfiler; import org.elasticsearch.search.profile.query.QueryProfiler; import java.util.ArrayList; import java.util.Collections; import java.util.List; -/** Wrapper around several {@link QueryProfiler}s that makes management easier. */ +/** Wrapper around all the profilers that makes management easier. */ public final class Profilers { private final ContextIndexSearcher searcher; private final List queryProfilers; + private final AggregationProfiler aggProfiler; /** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */ public Profilers(ContextIndexSearcher searcher) { this.searcher = searcher; this.queryProfilers = new ArrayList<>(); + this.aggProfiler = new AggregationProfiler(); addQueryProfiler(); } @@ -57,4 +60,9 @@ public final class Profilers { return Collections.unmodifiableList(queryProfilers); } + /** Return the {@link AggregationProfiler}. */ + public AggregationProfiler getAggregationProfiler() { + return aggProfiler; + } + } diff --git a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java index bf265dd9a7e..6794aa49399 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java +++ b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java @@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.aggregation.AggregationProfiler; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.profile.query.QueryProfiler; import java.io.IOException; @@ -32,7 +35,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; /** * A container class to hold all the profile results across all shards. Internally @@ -40,17 +42,10 @@ import java.util.stream.Collectors; */ public final class SearchProfileShardResults implements Writeable, ToXContent{ - private Map> shardResults; + private Map shardResults; - public SearchProfileShardResults(Map> shardResults) { - Map> transformed = - shardResults.entrySet() - .stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - e -> Collections.unmodifiableList(e.getValue())) - ); - this.shardResults = Collections.unmodifiableMap(transformed); + public SearchProfileShardResults(Map shardResults) { + this.shardResults = Collections.unmodifiableMap(shardResults); } public SearchProfileShardResults(StreamInput in) throws IOException { @@ -59,33 +54,22 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{ for (int i = 0; i < size; i++) { String key = in.readString(); - int shardResultsSize = in.readInt(); - - List shardResult = new ArrayList<>(shardResultsSize); - - for (int j = 0; j < shardResultsSize; j++) { - ProfileShardResult result = new ProfileShardResult(in); - shardResult.add(result); - } - shardResults.put(key, Collections.unmodifiableList(shardResult)); + ProfileShardResult shardResult = new ProfileShardResult(in); + shardResults.put(key, shardResult); } shardResults = Collections.unmodifiableMap(shardResults); } - public Map> getShardResults() { + public Map getShardResults() { return this.shardResults; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(shardResults.size()); - for (Map.Entry> entry : shardResults.entrySet()) { + for (Map.Entry entry : shardResults.entrySet()) { out.writeString(entry.getKey()); - out.writeInt(entry.getValue().size()); - - for (ProfileShardResult result : entry.getValue()) { - result.writeTo(out); - } + entry.getValue().writeTo(out); } } @@ -93,14 +77,18 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("profile").startArray("shards"); - for (Map.Entry> entry : shardResults.entrySet()) { - builder.startObject().field("id",entry.getKey()).startArray("searches"); - for (ProfileShardResult result : entry.getValue()) { + for (Map.Entry entry : shardResults.entrySet()) { + builder.startObject(); + builder.field("id", entry.getKey()); + builder.startArray("searches"); + for (QueryProfileShardResult result : entry.getValue().getQueryProfileResults()) { builder.startObject(); result.toXContent(builder, params); builder.endObject(); } - builder.endArray().endObject(); + builder.endArray(); + entry.getValue().getAggregationProfileResults().toXContent(builder, params); + builder.endObject(); } builder.endArray().endObject(); @@ -112,16 +100,20 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{ * can be serialized to other nodes, emitted as JSON, etc. * * @param profilers - * A list of Profilers to convert into - * InternalProfileShardResults - * @return A list of corresponding InternalProfileShardResults + * The {@link Profilers} to convert into results + * @return A {@link ProfileShardResult} representing the results for this + * shard */ - public static List buildShardResults(List profilers) { - List results = new ArrayList<>(profilers.size()); - for (QueryProfiler profiler : profilers) { - ProfileShardResult result = new ProfileShardResult(profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector()); - results.add(result); + public static ProfileShardResult buildShardResults(Profilers profilers) { + List queryProfilers = profilers.getQueryProfilers(); + AggregationProfiler aggProfiler = profilers.getAggregationProfiler(); + List queryResults = new ArrayList<>(queryProfilers.size()); + for (QueryProfiler queryProfiler : queryProfilers) { + QueryProfileShardResult result = new QueryProfileShardResult(queryProfiler.getTree(), queryProfiler.getRewriteTime(), + queryProfiler.getCollector()); + queryResults.add(result); } - return results; + AggregationProfileShardResult aggResults = new AggregationProfileShardResult(aggProfiler.getTree()); + return new ProfileShardResult(queryResults, aggResults); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java similarity index 64% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java rename to core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java index a06bcdf9840..b4cb1efe5d3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java @@ -17,21 +17,14 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.search.profile.aggregation; -import org.elasticsearch.painless.Variables.Reserved; -import org.elasticsearch.painless.node.SSource; +import org.elasticsearch.search.profile.AbstractProfileBreakdown; -/** - * Runs the analysis phase of compilation using the Painless AST. - */ -final class Analyzer { - static Variables analyze(Reserved shortcut, SSource root) { - Variables variables = new Variables(shortcut); - root.analyze(variables); +public class AggregationProfileBreakdown extends AbstractProfileBreakdown { - return variables; + public AggregationProfileBreakdown() { + super(AggregationTimingType.values()); } - private Analyzer() {} } diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java new file mode 100644 index 00000000000..df55c5592d6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.profile.ProfileResult; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A container class to hold the profile results for a single shard in the request. + * Contains a list of query profiles, a collector tree and a total rewrite tree. + */ +public final class AggregationProfileShardResult implements Writeable, ToXContent { + + private final List aggProfileResults; + + public AggregationProfileShardResult(List aggProfileResults) { + this.aggProfileResults = aggProfileResults; + } + + /** + * Read from a stream. + */ + public AggregationProfileShardResult(StreamInput in) throws IOException { + int profileSize = in.readVInt(); + aggProfileResults = new ArrayList<>(profileSize); + for (int j = 0; j < profileSize; j++) { + aggProfileResults.add(new ProfileResult(in)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(aggProfileResults.size()); + for (ProfileResult p : aggProfileResults) { + p.writeTo(out); + } + } + + + public List getProfileResults() { + return Collections.unmodifiableList(aggProfileResults); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("aggregations"); + for (ProfileResult p : aggProfileResults) { + p.toXContent(builder, params); + } + builder.endArray(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java new file mode 100644 index 00000000000..45d401ccbdc --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.profile.AbstractProfiler; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public class AggregationProfiler extends AbstractProfiler { + + private final Map, AggregationProfileBreakdown> profileBrakdownLookup = new HashMap<>(); + + public AggregationProfiler() { + super(new InternalAggregationProfileTree()); + } + + @Override + public AggregationProfileBreakdown getQueryBreakdown(Aggregator agg) { + List path = getAggregatorPath(agg); + AggregationProfileBreakdown aggregationProfileBreakdown = profileBrakdownLookup.get(path); + if (aggregationProfileBreakdown == null) { + aggregationProfileBreakdown = super.getQueryBreakdown(agg); + profileBrakdownLookup.put(path, aggregationProfileBreakdown); + } + return aggregationProfileBreakdown; + } + + public static List getAggregatorPath(Aggregator agg) { + LinkedList path = new LinkedList<>(); + while (agg != null) { + path.addFirst(agg.name()); + agg = agg.parent(); + } + return path; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java new file mode 100644 index 00000000000..d1c5d3dd538 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import java.util.Locale; + +public enum AggregationTimingType { + INITIALIZE, + COLLECT, + BUILD_AGGREGATION, + REDUCE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java new file mode 100644 index 00000000000..f367595c84c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactory.MultiBucketAggregatorWrapper; +import org.elasticsearch.search.profile.AbstractInternalProfileTree; + +public class InternalAggregationProfileTree extends AbstractInternalProfileTree { + + @Override + protected AggregationProfileBreakdown createProfileBreakdown() { + return new AggregationProfileBreakdown(); + } + + @Override + protected String getTypeFromElement(Aggregator element) { + if (element instanceof MultiBucketAggregatorWrapper) { + return ((MultiBucketAggregatorWrapper) element).getWrappedClass().getName(); + } + return element.getClass().getName(); + } + + @Override + protected String getDescriptionFromElement(Aggregator element) { + return element.name(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java new file mode 100644 index 00000000000..2883c2903e8 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.support.AggregationContext; + +import java.io.IOException; + +public class ProfilingAggregator extends Aggregator { + + private final Aggregator delegate; + private final AggregationProfiler profiler; + private AggregationProfileBreakdown profileBreakdown; + + public ProfilingAggregator(Aggregator delegate, AggregationProfiler profiler) throws IOException { + this.profiler = profiler; + this.delegate = delegate; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public boolean needsScores() { + return delegate.needsScores(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public AggregationContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + profileBreakdown.startTime(AggregationTimingType.BUILD_AGGREGATION); + InternalAggregation result = delegate.buildAggregation(bucket); + profileBreakdown.stopAndRecordTime(); + return result; + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new ProfilingLeafBucketCollector(delegate.getLeafCollector(ctx), profileBreakdown); + } + + @Override + public void preCollection() throws IOException { + this.profileBreakdown = profiler.getQueryBreakdown(delegate); + profileBreakdown.startTime(AggregationTimingType.INITIALIZE); + delegate.preCollection(); + profileBreakdown.stopAndRecordTime(); + profiler.pollLastElement(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java new file mode 100644 index 00000000000..75c90ded709 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; + +public class ProfilingLeafBucketCollector extends LeafBucketCollector { + + private LeafBucketCollector delegate; + private AggregationProfileBreakdown profileBreakdown; + + public ProfilingLeafBucketCollector(LeafBucketCollector delegate, AggregationProfileBreakdown profileBreakdown) { + this.delegate = delegate; + this.profileBreakdown = profileBreakdown; + } + + @Override + public void collect(int doc, long bucket) throws IOException { + profileBreakdown.startTime(AggregationTimingType.COLLECT); + delegate.collect(doc, bucket); + profileBreakdown.stopAndRecordTime(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java index 5b92ef8b2a9..013b7d3a506 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java @@ -20,89 +20,33 @@ package org.elasticsearch.search.profile.query; import org.apache.lucene.search.Query; +import org.elasticsearch.search.profile.AbstractInternalProfileTree; import org.elasticsearch.search.profile.ProfileResult; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Deque; -import java.util.List; -import java.util.Map; -import java.util.concurrent.LinkedBlockingDeque; - /** * This class tracks the dependency tree for queries (scoring and rewriting) and * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree * and returns a list of {@link ProfileResult} that can be serialized back to the client */ -final class InternalQueryProfileTree { - - private ArrayList timings; - - /** Maps the Query to it's list of children. This is basically the dependency tree */ - private ArrayList> tree; - - /** A list of the original queries, keyed by index position */ - private ArrayList queries; - - /** A list of top-level "roots". Each root can have its own tree of profiles */ - private ArrayList roots; +final class InternalQueryProfileTree extends AbstractInternalProfileTree { /** Rewrite time */ private long rewriteTime; private long rewriteScratch; - /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */ - private Deque stack; - - private int currentToken = 0; - - public InternalQueryProfileTree() { - timings = new ArrayList<>(10); - stack = new LinkedBlockingDeque<>(10); - tree = new ArrayList<>(10); - queries = new ArrayList<>(10); - roots = new ArrayList<>(10); + @Override + protected QueryProfileBreakdown createProfileBreakdown() { + return new QueryProfileBreakdown(); } - /** - * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those - * that are past the rewrite phase and are now being wrapped by createWeight() ) follow - * a recursive progression. We can track the dependency tree by a simple stack - * - * The only hiccup is that the first scoring query will be identical to the last rewritten - * query, so we need to take special care to fix that - * - * @param query The scoring query we wish to profile - * @return A ProfileBreakdown for this query - */ - public QueryProfileBreakdown getQueryBreakdown(Query query) { - int token = currentToken; + @Override + protected String getTypeFromElement(Query query) { + return query.getClass().getSimpleName(); + } - boolean stackEmpty = stack.isEmpty(); - - // If the stack is empty, we are a new root query - if (stackEmpty) { - - // We couldn't find a rewritten query to attach to, so just add it as a - // top-level root. This is just a precaution: it really shouldn't happen. - // We would only get here if a top-level query that never rewrites for some reason. - roots.add(token); - - // Increment the token since we are adding a new node, but notably, do not - // updateParent() because this was added as a root - currentToken += 1; - stack.add(token); - - return addDependencyNode(query, token); - } - - updateParent(token); - - // Increment the token since we are adding a new node - currentToken += 1; - stack.add(token); - - return addDependencyNode(query, token); + @Override + protected String getDescriptionFromElement(Query query) { + return query.toString(); } /** @@ -128,113 +72,7 @@ final class InternalQueryProfileTree { return time; } - /** - * Helper method to add a new node to the dependency tree. - * - * Initializes a new list in the dependency tree, saves the query and - * generates a new {@link QueryProfileBreakdown} to track the timings - * of this query - * - * @param query The query to profile - * @param token The assigned token for this query - * @return A ProfileBreakdown to profile this query - */ - private QueryProfileBreakdown addDependencyNode(Query query, int token) { - - // Add a new slot in the dependency tree - tree.add(new ArrayList<>(5)); - - // Save our query for lookup later - queries.add(query); - - QueryProfileBreakdown queryTimings = new QueryProfileBreakdown(); - timings.add(token, queryTimings); - return queryTimings; - } - - /** - * Removes the last (e.g. most recent) value on the stack - */ - public void pollLast() { - stack.pollLast(); - } - - /** - * After the query has been run and profiled, we need to merge the flat timing map - * with the dependency graph to build a data structure that mirrors the original - * query tree - * - * @return a hierarchical representation of the profiled query tree - */ - public List getQueryTree() { - ArrayList results = new ArrayList<>(5); - for (Integer root : roots) { - results.add(doGetQueryTree(root)); - } - return results; - } - - /** - * Recursive helper to finalize a node in the dependency tree - * @param token The node we are currently finalizing - * @return A hierarchical representation of the tree inclusive of children at this level - */ - private ProfileResult doGetQueryTree(int token) { - Query query = queries.get(token); - QueryProfileBreakdown breakdown = timings.get(token); - Map timings = breakdown.toTimingMap(); - List children = tree.get(token); - List childrenProfileResults = Collections.emptyList(); - - if (children != null) { - childrenProfileResults = new ArrayList<>(children.size()); - for (Integer child : children) { - ProfileResult childNode = doGetQueryTree(child); - childrenProfileResults.add(childNode); - } - } - - // TODO this would be better done bottom-up instead of top-down to avoid - // calculating the same times over and over...but worth the effort? - long nodeTime = getNodeTime(timings, childrenProfileResults); - String queryDescription = query.getClass().getSimpleName(); - String luceneName = query.toString(); - return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime); - } - public long getRewriteTime() { return rewriteTime; } - - /** - * Internal helper to add a child to the current parent node - * - * @param childToken The child to add to the current parent - */ - private void updateParent(int childToken) { - Integer parent = stack.peekLast(); - ArrayList parentNode = tree.get(parent); - parentNode.add(childToken); - tree.set(parent, parentNode); - } - - /** - * Internal helper to calculate the time of a node, inclusive of children - * - * @param timings A map of breakdown timing for the node - * @param children All children profile results at this node - * @return The total time at this node, inclusive of children - */ - private static long getNodeTime(Map timings, List children) { - long nodeTime = 0; - for (long time : timings.values()) { - nodeTime += time; - } - - // Then add up our children - for (ProfileResult child : children) { - nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren()); - } - return nodeTime; - } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java new file mode 100644 index 00000000000..d5e00aca336 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.query; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.profile.ProfileResult; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A container class to hold the profile results for a single shard in the request. + * Contains a list of query profiles, a collector tree and a total rewrite tree. + */ +public final class QueryProfileShardResult implements Writeable, ToXContent { + + private final List queryProfileResults; + + private final CollectorResult profileCollector; + + private final long rewriteTime; + + public QueryProfileShardResult(List queryProfileResults, long rewriteTime, + CollectorResult profileCollector) { + assert(profileCollector != null); + this.queryProfileResults = queryProfileResults; + this.profileCollector = profileCollector; + this.rewriteTime = rewriteTime; + } + + /** + * Read from a stream. + */ + public QueryProfileShardResult(StreamInput in) throws IOException { + int profileSize = in.readVInt(); + queryProfileResults = new ArrayList<>(profileSize); + for (int j = 0; j < profileSize; j++) { + queryProfileResults.add(new ProfileResult(in)); + } + + profileCollector = new CollectorResult(in); + rewriteTime = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(queryProfileResults.size()); + for (ProfileResult p : queryProfileResults) { + p.writeTo(out); + } + profileCollector.writeTo(out); + out.writeLong(rewriteTime); + } + + + public List getQueryResults() { + return Collections.unmodifiableList(queryProfileResults); + } + + public long getRewriteTime() { + return rewriteTime; + } + + public CollectorResult getCollectorResult() { + return profileCollector; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("query"); + for (ProfileResult p : queryProfileResults) { + p.toXContent(builder, params); + } + builder.endArray(); + builder.field("rewrite_time", rewriteTime); + builder.startArray("collector"); + profileCollector.toXContent(builder, params); + builder.endArray(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java index 57341ee132f..0051356e35a 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java @@ -20,9 +20,8 @@ package org.elasticsearch.search.profile.query; import org.apache.lucene.search.Query; -import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.search.profile.AbstractProfiler; -import java.util.List; import java.util.Objects; /** @@ -36,16 +35,16 @@ import java.util.Objects; * request may execute two searches (query + global agg). A Profiler just * represents one of those */ -public final class QueryProfiler { - - private final InternalQueryProfileTree queryTree = new InternalQueryProfileTree(); +public final class QueryProfiler extends AbstractProfiler { /** * The root Collector used in the search */ private InternalProfileCollector collector; - public QueryProfiler() {} + public QueryProfiler() { + super(new InternalQueryProfileTree()); + } /** Set the collector that is associated with this profiler. */ public void setCollector(InternalProfileCollector collector) { @@ -55,21 +54,12 @@ public final class QueryProfiler { this.collector = Objects.requireNonNull(collector); } - /** - * Get the {@link QueryProfileBreakdown} for the given query, potentially creating it if it did not exist. - * This should only be used for queries that will be undergoing scoring. Do not use it to profile the - * rewriting phase - */ - public QueryProfileBreakdown getQueryBreakdown(Query query) { - return queryTree.getQueryBreakdown(query); - } - /** * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a * single metric */ public void startRewriteTime() { - queryTree.startRewriteTime(); + ((InternalQueryProfileTree) profileTree).startRewriteTime(); } /** @@ -79,29 +69,14 @@ public final class QueryProfiler { * @return cumulative rewrite time */ public long stopAndAddRewriteTime() { - return queryTree.stopAndAddRewriteTime(); - } - - /** - * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring - * queries, not rewritten queries - */ - public void pollLastQuery() { - queryTree.pollLast(); - } - - /** - * @return a hierarchical representation of the profiled query tree - */ - public List getQueryTree() { - return queryTree.getQueryTree(); + return ((InternalQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { - return queryTree.getRewriteTime(); + return ((InternalQueryProfileTree) profileTree).getRewriteTime(); } /** diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index df68064f617..174a337f3d2 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -112,8 +112,8 @@ public class QueryPhase implements SearchPhase { aggregationPhase.execute(searchContext); if (searchContext.getProfilers() != null) { - List shardResults = SearchProfileShardResults - .buildShardResults(searchContext.getProfilers().getQueryProfilers()); + ProfileShardResult shardResults = SearchProfileShardResults + .buildShardResults(searchContext.getProfilers()); searchContext.queryResult().profileResults(shardResults); } } @@ -385,8 +385,8 @@ public class QueryPhase implements SearchPhase { queryResult.topDocs(topDocsCallable.call(), sortValueFormats); if (searchContext.getProfilers() != null) { - List shardResults = SearchProfileShardResults - .buildShardResults(searchContext.getProfilers().getQueryProfilers()); + ProfileShardResult shardResults = SearchProfileShardResults + .buildShardResults(searchContext.getProfilers()); searchContext.queryResult().profileResults(shardResults); } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 1408ebe8359..be8c895eecd 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -59,7 +59,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; - private List profileShardResults; + private ProfileShardResult profileShardResults; public QuerySearchResult() { @@ -143,7 +143,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { * Returns the profiled results for this search, or potentially null if result was empty * @return The profiled results, or null */ - public @Nullable List profileResults() { + public @Nullable ProfileShardResult profileResults() { return profileShardResults; } @@ -151,7 +151,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { * Sets the finalized profiling results for this query * @param shardResults The finalized profile */ - public void profileResults(List shardResults) { + public void profileResults(ProfileShardResult shardResults) { this.profileShardResults = shardResults; } @@ -237,12 +237,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { terminatedEarly = in.readOptionalBoolean(); if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { - int profileSize = in.readVInt(); - profileShardResults = new ArrayList<>(profileSize); - for (int i = 0; i < profileSize; i++) { - ProfileShardResult result = new ProfileShardResult(in); - profileShardResults.add(result); - } + profileShardResults = new ProfileShardResult(in); } } @@ -296,10 +291,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeVInt(profileShardResults.size()); - for (ProfileShardResult shardResult : profileShardResults) { - shardResult.writeTo(out); - } + profileShardResults.writeTo(out); } } } diff --git a/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java b/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java index 0d87b275403..2b8040ebd28 100644 --- a/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java +++ b/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java @@ -61,7 +61,7 @@ public abstract class SliceQuery extends Query { @Override public boolean equals(Object o) { - if (super.equals(o) == false) { + if (sameClassAs(o) == false) { return false; } SliceQuery that = (SliceQuery) o; @@ -70,7 +70,7 @@ public abstract class SliceQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), field, id, max); + return Objects.hash(classHash(), field, id, max); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java index b967a6b6e71..429a3ebe892 100644 --- a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java +++ b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java @@ -74,11 +74,7 @@ public final class TermsSliceQuery extends SliceQuery { int hashCode = term.hashCode(); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); - int docId = docsEnum.nextDoc(); - while (docId != DocIdSetIterator.NO_MORE_DOCS) { - builder.add(docId); - docId = docsEnum.nextDoc(); - } + builder.add(docsEnum); } } return builder.build(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java index 91b6bc120ad..03856017c36 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java @@ -42,9 +42,6 @@ import org.jboss.netty.channel.ChannelFutureListener; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; -/** - * - */ public class NettyTransportChannel implements TransportChannel { private final NettyTransport transport; @@ -55,7 +52,7 @@ public class NettyTransportChannel implements TransportChannel { private final long requestId; private final String profileName; private final long reservedBytes; - private final AtomicBoolean closed = new AtomicBoolean(); + private final AtomicBoolean released = new AtomicBoolean(); public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, long requestId, Version version, String profileName, long reservedBytes) { @@ -86,7 +83,7 @@ public class NettyTransportChannel implements TransportChannel { @Override public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { - close(); + release(); if (transport.compress) { options = TransportResponseOptions.builder(options).withCompress(transport.compress).build(); } @@ -128,7 +125,7 @@ public class NettyTransportChannel implements TransportChannel { @Override public void sendResponse(Throwable error) throws IOException { - close(); + release(); BytesStreamOutput stream = new BytesStreamOutput(); stream.skip(NettyHeader.HEADER_SIZE); RemoteTransportException tx = new RemoteTransportException( @@ -147,10 +144,10 @@ public class NettyTransportChannel implements TransportChannel { future.addListener(onResponseSentListener); } - private void close() { - // attempt to close once atomically - if (closed.compareAndSet(false, true) == false) { - throw new IllegalStateException("Channel is already closed"); + private void release() { + // attempt to release once atomically + if (released.compareAndSet(false, true) == false) { + throw new IllegalStateException("reserved bytes are already released"); } transport.inFlightRequestsBreaker().addWithoutBreaking(-reservedBytes); } @@ -174,4 +171,5 @@ public class NettyTransportChannel implements TransportChannel { public Channel getChannel() { return channel; } + } diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index ff0ea773667..b185289b58d 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.0.1.jar}" { +grant codeBase "${codebase.lucene-core-6.1.0-snapshot-3a57bea.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,6 +42,11 @@ grant codeBase "${codebase.lucene-core-6.0.1.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; +grant codeBase "${codebase.lucene-misc-6.1.0-snapshot-3a57bea.jar}" { + // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper + permission java.nio.file.LinkPermission "hard"; +}; + //// Everything else: grant { diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index d4ab6e01ab9..d7faab5eeda 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -31,9 +31,11 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.0.1.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.1.0-snapshot-3a57bea.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // needed for testing hardlinks in StoreRecoveryTests since we install MockFS + permission java.nio.file.LinkPermission "hard"; }; grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" { diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 65c91f5daab..862cccab318 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -270,7 +270,8 @@ public class VersionTests extends ESTestCase { assertTrue("lucene versions must be " + other + " >= " + version, other.luceneVersion.onOrAfter(version.luceneVersion)); } - if (other.major == version.major && other.minor == version.minor) { + if (other.isAlpha() == false && version.isAlpha() == false + && other.major == version.major && other.minor == version.minor) { assertEquals(other.luceneVersion.major, version.luceneVersion.major); assertEquals(other.luceneVersion.minor, version.luceneVersion.minor); // should we also assert the lucene bugfix version? diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java new file mode 100644 index 00000000000..be4a7b29703 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.ClusterState; + +public class TransportMasterNodeActionUtils { + + /** + * Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is + * a protected method. + */ + public static , Response extends ActionResponse> void runMasterOperation( + TransportMasterNodeAction masterNodeAction, Request request, ClusterState clusterState, + ActionListener actionListener) throws Exception { + assert masterNodeAction.checkBlock(request, clusterState) == null; + masterNodeAction.masterOperation(request, clusterState, actionListener); + } +} diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java index d2ef349625e..a6e74a47706 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java @@ -72,4 +72,8 @@ public class JavaVersionTests extends ESTestCase { assertFalse(JavaVersion.isValid(version)); } } + + public void testJava8Compat() { + assertEquals(JavaVersion.parse("1.8"), JavaVersion.parse("8")); + } } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 419104bfe34..494aa7d1095 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -1,4 +1,3 @@ -/* /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -46,6 +45,7 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.SortedSet; @@ -127,6 +127,44 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { } } + public void testRestoreSnapshotWithMissingChecksum() throws Exception { + final String repo = "test_repo"; + final String snapshot = "test_1"; + final String indexName = "index-2.3.4"; + final String repoFileId = "missing-checksum-repo-2.3.4"; + Path repoFile = getBwcIndicesPath().resolve(repoFileId + ".zip"); + URI repoFileUri = repoFile.toUri(); + URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/"); + logger.info("--> creating repository [{}] for repo file [{}]", repo, repoFileId); + assertAcked(client().admin().cluster().preparePutRepository(repo) + .setType("url") + .setSettings(Settings.builder().put("url", repoJarUri.toString()))); + + logger.info("--> get snapshot and check its indices"); + GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get(); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); + SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); + assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName))); + + logger.info("--> restoring snapshot"); + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get(); + assertThat(response.status(), equalTo(RestStatus.OK)); + RestoreInfo restoreInfo = response.getRestoreInfo(); + assertThat(restoreInfo.successfulShards(), greaterThan(0)); + assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards())); + assertThat(restoreInfo.failedShards(), equalTo(0)); + String index = restoreInfo.indices().get(0); + assertThat(index, equalTo(indexName)); + + logger.info("--> check search"); + SearchResponse searchResponse = client().prepareSearch(index).get(); + assertThat(searchResponse.getHits().totalHits(), greaterThan(0L)); + + logger.info("--> cleanup"); + cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()])); + cluster().wipeTemplates(); + } + private List repoVersions() throws Exception { return listRepoVersions("repo"); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 731ecb859ee..7504c778d36 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; @@ -54,11 +55,17 @@ public class ClusterChangedEventTests extends ESTestCase { private static final ClusterName TEST_CLUSTER_NAME = new ClusterName("test"); private static final String NODE_ID_PREFIX = "node_"; - private static final String INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID(); - // the initial indices which every cluster state test starts out with - private static final List initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()), - new Index("idx2", UUIDs.randomBase64UUID()), - new Index("idx3", UUIDs.randomBase64UUID())); + private static String INITIAL_CLUSTER_ID; + private static List initialIndices; + + @BeforeClass + public static void beforeClass() { + INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID(); + // the initial indices which every cluster state test starts out with + initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()), + new Index("idx2", UUIDs.randomBase64UUID()), + new Index("idx3", UUIDs.randomBase64UUID())); + } /** * Test basic properties of the ClusterChangedEvent class: @@ -140,24 +147,24 @@ public class ClusterChangedEventTests extends ESTestCase { */ public void testIndexMetaDataChange() { final int numNodesInCluster = 3; - final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices); - final ClusterState newState = originalState; // doesn't matter for this test, just need a non-null value - final ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState); + final ClusterState state = createState(numNodesInCluster, randomBoolean(), initialIndices); // test when its not the same IndexMetaData final Index index = initialIndices.get(0); - final IndexMetaData originalIndexMeta = originalState.metaData().index(index); + final IndexMetaData originalIndexMeta = state.metaData().index(index); // make sure the metadata is actually on the cluster state assertNotNull("IndexMetaData for " + index + " should exist on the cluster state", originalIndexMeta); IndexMetaData newIndexMeta = createIndexMetadata(index, originalIndexMeta.getVersion() + 1); - assertTrue("IndexMetaData with different version numbers must be considered changed", event.indexMetaDataChanged(newIndexMeta)); + assertTrue("IndexMetaData with different version numbers must be considered changed", + ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta)); // test when it doesn't exist newIndexMeta = createIndexMetadata(new Index("doesntexist", UUIDs.randomBase64UUID())); - assertTrue("IndexMetaData that didn't previously exist should be considered changed", event.indexMetaDataChanged(newIndexMeta)); + assertTrue("IndexMetaData that didn't previously exist should be considered changed", + ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta)); // test when its the same IndexMetaData - assertFalse("IndexMetaData should be the same", event.indexMetaDataChanged(originalIndexMeta)); + assertFalse("IndexMetaData should be the same", ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, originalIndexMeta)); } /** diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 307df91c302..1f39706e4f4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -161,7 +161,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { } } - private static final class RandomAllocationDecider extends AllocationDecider { + public static final class RandomAllocationDecider extends AllocationDecider { private final Random random; diff --git a/core/src/test/java/org/elasticsearch/common/UUIDTests.java b/core/src/test/java/org/elasticsearch/common/UUIDTests.java index f82e1a464d9..d963db2d6f6 100644 --- a/core/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/core/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -20,7 +20,9 @@ package org.elasticsearch.common; import org.elasticsearch.test.ESTestCase; +import java.security.SecureRandom; import java.util.HashSet; +import java.util.Random; import java.util.Set; public class UUIDTests extends ESTestCase { @@ -41,7 +43,18 @@ public class UUIDTests extends ESTestCase { } public void testThreadedRandomUUID() { - testUUIDThreaded(randomUUIDGen); + // we can not use a reproducible source of randomness for this + // test, the test explicitly relies on each thread having a + // unique source of randomness; thus, we fake what production + // code does when using a RandomBasedUUIDGenerator + testUUIDThreaded(new RandomBasedUUIDGenerator() { + private final SecureRandom sr = SecureRandomHolder.INSTANCE; + + @Override + public String getBase64UUID() { + return getBase64UUID(sr); + } + }); } Set verifyUUIDSet(int count, UUIDGenerator uuidSource) { @@ -98,6 +111,6 @@ public class UUIDTests extends ESTestCase { for (UUIDGenRunner runner : runners) { globalSet.addAll(runner.uuidSet); } - assertEquals(count*uuids, globalSet.size()); + assertEquals(count * uuids, globalSet.size()); } } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java index ba6b4438aaa..e1135f807cf 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java @@ -54,7 +54,6 @@ public class NettyHttpRequestSizeLimitIT extends ESIntegTestCase { .build(); } - @TestLogging("_root:DEBUG,org.elasticsearch.common.breaker:TRACE,org.elasticsearch.test:TRACE,org.elasticsearch.transport:TRACE") public void testLimitsInFlightRequests() throws Exception { ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java index 3c791e72b5f..2b2c9288f17 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java @@ -65,4 +65,22 @@ public class CharFilterTests extends ESTokenStreamTestCase { // Repeat one more time to make sure that char filter is reinitialized correctly assertTokenStreamContents(analyzer1.tokenStream("test", "hello!"), new String[]{"hello"}); } + + public void testPatternReplaceCharFilter() throws Exception { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.char_filter.my_mapping.type", "pattern_replace") + .put("index.analysis.char_filter.my_mapping.pattern", "ab*") + .put("index.analysis.char_filter.my_mapping.replacement", "oo") + .put("index.analysis.char_filter.my_mapping.flags", "CASE_INSENSITIVE") + .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard") + .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); + AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings); + NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter"); + + assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"}); + } } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java index 062774bf2f7..4e4d638d355 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java @@ -22,9 +22,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.spatial.geopoint.document.GeoPointField; -import org.apache.lucene.spatial.util.GeoUtils; import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import static org.elasticsearch.test.geo.RandomShapeGenerator.randomPoint; import static org.hamcrest.Matchers.allOf; @@ -105,8 +105,8 @@ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImpl assertThat(docCount, greaterThan(0)); for (int i = 0; i < docCount; ++i) { final GeoPoint point = values.valueAt(i); - assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT_INCL), lessThanOrEqualTo(GeoUtils.MAX_LAT_INCL))); - assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON_INCL), lessThanOrEqualTo(GeoUtils.MAX_LON_INCL))); + assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT), lessThanOrEqualTo(GeoUtils.MAX_LAT))); + assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON), lessThanOrEqualTo(GeoUtils.MAX_LON))); } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java index ee19d094a3f..a3909637548 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java @@ -302,4 +302,19 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase { assertEquals(1457654400000L, dvField.numericValue().longValue()); assertFalse(dvField.fieldType().stored()); } + + public void testNullConfigValuesFail() throws MapperParsingException, IOException { + String mapping = XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date") + .field("format", (String) null) + .endObject() + .endObject() + .endObject().endObject().string(); + + Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("[format] must not have a [null] value", e.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java index de14f38d6a9..224d512cb53 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.core.TextFieldMapper.TextFieldType; import org.elasticsearch.index.mapper.ParsedDocument; @@ -458,4 +459,19 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertThat(fieldType.fielddataMaxFrequency(), equalTo((double) Integer.MAX_VALUE)); assertThat(fieldType.fielddataMinSegmentSize(), equalTo(1000)); } + + public void testNullConfigValuesFail() throws MapperParsingException, IOException { + String mapping = XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("analyzer", (String) null) + .endObject() + .endObject() + .endObject().endObject().string(); + + Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("[analyzer] must not have a [null] value", e.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java index 177d3b7b0f7..b2e1989454c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.mapper.externalvalues; -import org.apache.lucene.spatial.util.GeoEncodingUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; @@ -88,7 +88,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); @@ -146,7 +146,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); @@ -208,7 +208,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index a1fdb7ec60f..202afd7a4b1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.geo; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -46,7 +47,6 @@ import java.util.List; import java.util.Map; import java.lang.NumberFormatException; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -90,7 +90,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (indexCreatedBefore22 == true) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -197,7 +197,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(89.0, 1.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(89.0, 1.0))); } doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() @@ -209,7 +209,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-89.0, -1.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(-89.0, -1.0))); } doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() @@ -221,7 +221,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-1.0, -179.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(-1.0, -179.0))); } } @@ -408,7 +408,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -441,7 +441,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3)); // indexed hash - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); // point field for 2nd value assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4)); @@ -450,7 +450,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5)); // indexed hash - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5))); } else { assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2)); assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2)); @@ -459,14 +459,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5)); if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5))); } } } @@ -491,7 +491,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -517,7 +517,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -559,12 +559,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5))); } } @@ -588,7 +588,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -613,7 +613,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -639,7 +639,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -669,14 +669,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5)); if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5))); } } else { assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(4)); @@ -685,12 +685,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2)); assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3)); - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[2].numericValue().doubleValue(), equalTo(1.5)); assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5)); - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java index 837cef6a17c..90528c9a8f4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.geo; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; @@ -35,7 +36,6 @@ import org.elasticsearch.test.VersionUtils; import java.util.Collection; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -72,7 +72,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } @@ -96,7 +96,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java index 522a35ccd5d..884f52cc0ed 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.ip; import java.net.InetAddress; import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.document.XInetAddressPoint; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.util.BytesRef; @@ -69,11 +68,11 @@ public class IpFieldTypeTests extends FieldTypeTestCase { ip = "2001:db8::2:1"; String prefix = ip + "/64"; - assertEquals(XInetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); + assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); ip = "192.168.1.7"; prefix = ip + "/16"; - assertEquals(XInetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); + assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, @@ -88,7 +87,7 @@ public class IpFieldTypeTests extends FieldTypeTestCase { assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), - XInetAddressPoint.MAX_VALUE), + InetAddressPoint.MAX_VALUE), ft.rangeQuery(null, null, randomBoolean(), randomBoolean())); assertEquals( @@ -106,13 +105,13 @@ public class IpFieldTypeTests extends FieldTypeTestCase { assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), - XInetAddressPoint.MAX_VALUE), + InetAddressPoint.MAX_VALUE), ft.rangeQuery("2001:db8::", null, true, randomBoolean())); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), - XInetAddressPoint.MAX_VALUE), + InetAddressPoint.MAX_VALUE), ft.rangeQuery("2001:db8::", null, false, randomBoolean())); assertEquals( @@ -152,7 +151,7 @@ public class IpFieldTypeTests extends FieldTypeTestCase { assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), - XInetAddressPoint.MAX_VALUE), + InetAddressPoint.MAX_VALUE), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true)); diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java index 7780d218b52..387df7ac3ca 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java @@ -22,10 +22,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery; -import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery; import org.elasticsearch.test.AbstractQueryTestCase; @@ -213,7 +213,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase queryBuilderPoints = queryBuilder.points(); - double[] lats = geoQuery.getLats(); - double[] lons = geoQuery.getLons(); + assertEquals(1, geoQuery.getPolygons().length); + double[] lats = geoQuery.getPolygons()[0].getPolyLats(); + double[] lons = geoQuery.getPolygons()[0].getPolyLons(); assertThat(lats.length, equalTo(queryBuilderPoints.size())); assertThat(lons.length, equalTo(queryBuilderPoints.size())); for (int i=0; i < queryBuilderPoints.size(); ++i) { @@ -321,8 +322,9 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase mappingConsumer = (type, mapping) -> { @@ -1575,7 +1565,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } } routing = ShardRoutingHelper.moveToStarted(routing); - newShard.updateRoutingEntry(routing, true); + newShard.updateRoutingEntry(routing); assertHitCount(client().prepareSearch("index_1").get(), 2); } // now check that it's persistent ie. that the added shards are committed @@ -1587,7 +1577,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.LOCAL_SHARDS, localNode, localNode)); assertTrue(newShard.recoverFromStore()); routing = ShardRoutingHelper.moveToStarted(routing); - newShard.updateRoutingEntry(routing, true); + newShard.updateRoutingEntry(routing); assertHitCount(client().prepareSearch("index_1").get(), 2); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 2543668f557..4938f686f60 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -155,10 +155,12 @@ public class RefreshListenersTests extends ESTestCase { DummyRefreshListener forcingListener = new DummyRefreshListener(); listeners.addOrNotify(index.getTranslogLocation(), forcingListener); assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); + forcingListener.assertNoError(); // That forces all the listeners through. It would be on the listener ThreadPool but we've made all of those execute immediately. for (DummyRefreshListener listener : nonForcedListeners) { assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get()); + listener.assertNoError(); } assertFalse(listeners.refreshNeeded()); } @@ -174,8 +176,9 @@ public class RefreshListenersTests extends ESTestCase { } DummyRefreshListener listener = new DummyRefreshListener(); - listeners.addOrNotify(index.getTranslogLocation(), listener); + assertTrue(listeners.addOrNotify(index.getTranslogLocation(), listener)); assertFalse(listener.forcedRefresh.get()); + listener.assertNoError(); } /** @@ -192,13 +195,17 @@ public class RefreshListenersTests extends ESTestCase { }); refresher.start(); try { - for (int i = 0; i < 100; i++) { + for (int i = 0; i < 1000; i++) { Engine.Index index = index("1"); - DummyRefreshListener listener = new DummyRefreshListener(); - listeners.addOrNotify(index.getTranslogLocation(), listener); - assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener); + if (immediate) { + assertNotNull(listener.forcedRefresh.get()); + } else { + assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + } assertFalse(listener.forcedRefresh.get()); + listener.assertNoError(); } } finally { run.set(false); @@ -234,6 +241,7 @@ public class RefreshListenersTests extends ESTestCase { if (threadCount < maxListeners) { assertFalse(listener.forcedRefresh.get()); } + listener.assertNoError(); Engine.Get get = new Engine.Get(false, index.uid()); try (Engine.GetResult getResult = engine.get(get)) { @@ -281,13 +289,24 @@ public class RefreshListenersTests extends ESTestCase { /** * When the listener is called this captures it's only argument. */ - private AtomicReference forcedRefresh = new AtomicReference<>(); + AtomicReference forcedRefresh = new AtomicReference<>(); + private volatile Throwable error; @Override public void accept(Boolean forcedRefresh) { - assertNotNull(forcedRefresh); - Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); - assertNull("Listener called twice", oldValue); + try { + assertNotNull(forcedRefresh); + Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); + assertNull("Listener called twice", oldValue); + } catch (Throwable e) { + error = e; + } + } + + public void assertNoError() { + if (error != null) { + throw new RuntimeException(error); + } } } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java index ffb64f991cc..f31733dc477 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java @@ -31,7 +31,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util.Version; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESTestCase; @@ -74,11 +73,9 @@ public class StoreRecoveryTests extends ESTestCase { assertEquals(numFiles, targetNumFiles); assertEquals(indexStats.totalFileCount(), targetNumFiles); if (hardLinksSupported(createTempDir())) { - assertEquals("upgrade to HardlinkCopyDirectoryWrapper in Lucene 6.1", Version.LATEST, Version.LUCENE_6_0_1); - // assertEquals(indexStats.reusedFileCount(), targetNumFiles); -- uncomment this once upgraded to Lucene 6.1 - assertEquals(indexStats.reusedFileCount(), 0); + assertEquals(targetNumFiles, indexStats.reusedFileCount()); } else { - assertEquals(indexStats.reusedFileCount(), 0); + assertEquals(0, indexStats.reusedFileCount(), 0); } DirectoryReader reader = DirectoryReader.open(target); SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target); diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index 67c431135a0..70eacaafedb 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.Fields; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.test.ESTestCase; @@ -105,11 +105,11 @@ public class FileInfoTests extends ESTestCase { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.startObject(); - builder.field(Fields.NAME, name); - builder.field(Fields.PHYSICAL_NAME, physicalName); - builder.field(Fields.LENGTH, length); - builder.field(Fields.WRITTEN_BY, Version.LATEST.toString()); - builder.field(Fields.CHECKSUM, "666"); + builder.field(FileInfo.NAME, name); + builder.field(FileInfo.PHYSICAL_NAME, physicalName); + builder.field(FileInfo.LENGTH, length); + builder.field(FileInfo.WRITTEN_BY, Version.LATEST.toString()); + builder.field(FileInfo.CHECKSUM, "666"); builder.endObject(); byte[] xContent = builder.bytes().toBytes(); diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 2722fc9d9d3..1f1b758f349 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -455,7 +455,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { assertEquals(1, imc.availableShards().size()); assertTrue(newShard.recoverFromStore()); assertTrue("we should have flushed in IMC at least once but did: " + flushes.get(), flushes.get() >= 1); - newShard.updateRoutingEntry(routing.moveToStarted(), true); + newShard.updateRoutingEntry(routing.moveToStarted()); } finally { newShard.close("simon says", false); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 8d59da7da01..92a411a95de 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -102,13 +102,13 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas newRouting = ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom")); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting); - shard.updateRoutingEntry(newRouting, true); + shard.updateRoutingEntry(newRouting); final DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("store", new RecoveryState(shard.shardId(), newRouting.primary(), RecoveryState.Type.SNAPSHOT, newRouting.restoreSource(), localNode)); shard.recoverFromStore(); newRouting = ShardRoutingHelper.moveToStarted(newRouting); - shard.updateRoutingEntry(newRouting, true); + shard.updateRoutingEntry(newRouting); } finally { indicesService.deleteIndex(idx, "simon says"); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index 5a4aa2e6b24..cd94ee0f8e9 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesQueryCache; @@ -54,12 +55,12 @@ public class IndicesQueryCacheTests extends ESTestCase { @Override public boolean equals(Object obj) { - return super.equals(obj) && id == ((DummyQuery) obj).id; + return sameClassAs(obj) && id == ((DummyQuery) obj).id; } @Override public int hashCode() { - return 31 * super.hashCode() + id; + return 31 * classHash() + id; } @Override @@ -93,6 +94,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .build(); IndicesQueryCache cache = new IndicesQueryCache(settings); s.setQueryCache(cache); @@ -173,6 +175,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .build(); IndicesQueryCache cache = new IndicesQueryCache(settings); s1.setQueryCache(cache); @@ -298,6 +301,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .build(); IndicesQueryCache cache = new IndicesQueryCache(settings); s1.setQueryCache(cache); diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java new file mode 100644 index 00000000000..69bee510710 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -0,0 +1,318 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.cluster; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.Callback; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndex; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.Shard; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.indices.recovery.RecoveryTargetService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; +import static org.hamcrest.Matchers.equalTo; + +/** + * Abstract base class for tests against {@link IndicesClusterStateService} + */ +public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestCase { + + + protected void failRandomly() { + if (rarely()) { + throw new RuntimeException("dummy test failure"); + } + } + + /** + * Checks if cluster state matches internal state of IndicesClusterStateService instance + * + * @param state cluster state used for matching + */ + public static void assertClusterStateMatchesNodeState(ClusterState state, IndicesClusterStateService indicesClusterStateService) { + AllocatedIndices> indicesService = + indicesClusterStateService.indicesService; + ConcurrentMap failedShardsCache = indicesClusterStateService.failedShardsCache; + RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId()); + if (localRoutingNode != null) { + // check that all shards in local routing nodes have been allocated + for (ShardRouting shardRouting : localRoutingNode) { + Index index = shardRouting.index(); + IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); + + Shard shard = indicesService.getShardOrNull(shardRouting.shardId()); + ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId()); + if (shard == null && failedShard == null) { + fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache"); + } + if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) { + fail("Shard cache has not been properly cleaned for " + failedShard); + } + + if (shard != null) { + AllocatedIndex indexService = indicesService.indexService(index); + assertTrue("Index " + index + " expected but missing in indicesService", indexService != null); + + // index metadata has been updated + assertThat(indexService.getIndexSettings().getIndexMetaData(), equalTo(indexMetaData)); + // shard has been created + if (failedShard == null) { + assertTrue("Shard with id " + shardRouting + " expected but missing in indexService", + shard != null); + // shard has latest shard routing + assertThat(shard.routingEntry(), equalTo(shardRouting)); + } + } + } + } + + // all other shards / indices have been cleaned up + for (AllocatedIndex indexService : indicesService) { + assertTrue(state.metaData().getIndexSafe(indexService.index()) != null); + + boolean shardsFound = false; + for (Shard shard : indexService) { + shardsFound = true; + ShardRouting persistedShardRouting = shard.routingEntry(); + boolean found = false; + for (ShardRouting shardRouting : localRoutingNode) { + if (persistedShardRouting.equals(shardRouting)) { + found = true; + } + } + assertTrue(found); + } + + if (shardsFound == false) { + // check if we have shards of that index in failedShardsCache + // if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread + assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index()))); + } + + } + } + + /** + * Mock for {@link IndicesService} + */ + protected class MockIndicesService implements AllocatedIndices { + private volatile Map indices = emptyMap(); + + @Override + public synchronized MockIndexService createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, + List buildInIndexListener) throws IOException { + MockIndexService indexService = new MockIndexService(new IndexSettings(indexMetaData, Settings.EMPTY)); + indices = newMapBuilder(indices).put(indexMetaData.getIndexUUID(), indexService).immutableMap(); + return indexService; + } + + @Override + public IndexMetaData verifyIndexIsDeleted(Index index, ClusterState state) { + return null; + } + + @Override + public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { + + } + + @Override + public synchronized void deleteIndex(Index index, String reason) { + if (hasIndex(index) == false) { + return; + } + Map newIndices = new HashMap<>(indices); + newIndices.remove(index.getUUID()); + indices = unmodifiableMap(newIndices); + } + + @Override + public synchronized void removeIndex(Index index, String reason) { + if (hasIndex(index) == false) { + return; + } + Map newIndices = new HashMap<>(indices); + newIndices.remove(index.getUUID()); + indices = unmodifiableMap(newIndices); + } + + @Override + public @Nullable MockIndexService indexService(Index index) { + return indices.get(index.getUUID()); + } + + @Override + public MockIndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, + RecoveryTargetService recoveryTargetService, + RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, + NodeServicesProvider nodeServicesProvider, Callback onShardFailure) + throws IOException { + failRandomly(); + MockIndexService indexService = indexService(recoveryState.getShardId().getIndex()); + MockIndexShard indexShard = indexService.createShard(shardRouting); + indexShard.recoveryState = recoveryState; + return indexShard; + } + + @Override + public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, + InterruptedException { + + } + + private boolean hasIndex(Index index) { + return indices.containsKey(index.getUUID()); + } + + @Override + public Iterator iterator() { + return indices.values().iterator(); + } + } + + /** + * Mock for {@link IndexService} + */ + protected class MockIndexService implements AllocatedIndex { + private volatile Map shards = emptyMap(); + + private final IndexSettings indexSettings; + + public MockIndexService(IndexSettings indexSettings) { + this.indexSettings = indexSettings; + } + + @Override + public IndexSettings getIndexSettings() { + return indexSettings; + } + + @Override + public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { + failRandomly(); + return false; + } + + @Override + public void updateMetaData(IndexMetaData indexMetaData) { + indexSettings.updateIndexMetaData(indexMetaData); + } + + @Override + public MockIndexShard getShardOrNull(int shardId) { + return shards.get(shardId); + } + + public synchronized MockIndexShard createShard(ShardRouting routing) throws IOException { + failRandomly(); + MockIndexShard shard = new MockIndexShard(routing); + shards = newMapBuilder(shards).put(routing.id(), shard).immutableMap(); + return shard; + } + + @Override + public synchronized void removeShard(int shardId, String reason) { + if (shards.containsKey(shardId) == false) { + return; + } + HashMap newShards = new HashMap<>(shards); + MockIndexShard indexShard = newShards.remove(shardId); + assert indexShard != null; + shards = unmodifiableMap(newShards); + } + + @Override + public Iterator iterator() { + return shards.values().iterator(); + } + + @Override + public Index index() { + return indexSettings.getIndex(); + } + } + + /** + * Mock for {@link IndexShard} + */ + protected class MockIndexShard implements IndicesClusterStateService.Shard { + private volatile ShardRouting shardRouting; + private volatile RecoveryState recoveryState; + + public MockIndexShard(ShardRouting shardRouting) { + this.shardRouting = shardRouting; + } + + @Override + public ShardId shardId() { + return shardRouting.shardId(); + } + + @Override + public RecoveryState recoveryState() { + return recoveryState; + } + + @Override + public ShardRouting routingEntry() { + return shardRouting; + } + + @Override + public IndexShardState state() { + return null; + } + + @Override + public void updateRoutingEntry(ShardRouting shardRouting) throws IOException { + failRandomly(); + assert this.shardId().equals(shardRouting.shardId()); + assert this.shardRouting.isSameAllocation(shardRouting); + this.shardRouting = shardRouting; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java new file mode 100644 index 00000000000..84e83db6d1d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -0,0 +1,234 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.open.TransportOpenIndexAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.AliasValidator; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; +import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; +import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; +import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.gateway.NoopGatewayAllocator; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; +import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyList; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ClusterStateChanges { + + private final ClusterService clusterService; + private final AllocationService allocationService; + + // transport actions + private final TransportCloseIndexAction transportCloseIndexAction; + private final TransportOpenIndexAction transportOpenIndexAction; + private final TransportDeleteIndexAction transportDeleteIndexAction; + private final TransportUpdateSettingsAction transportUpdateSettingsAction; + private final TransportClusterRerouteAction transportClusterRerouteAction; + private final TransportCreateIndexAction transportCreateIndexAction; + + public ClusterStateChanges() { + Settings settings = Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build(); + + allocationService = new AllocationService(settings, new AllocationDeciders(settings, + new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings), + new ReplicaAfterPrimaryActiveAllocationDecider(settings), + new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), + EmptyClusterInfoService.INSTANCE); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); + DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings); + Environment environment = new Environment(settings); + ThreadPool threadPool = null; // it's not used + Transport transport = null; // it's not used + + // mocks + clusterService = mock(ClusterService.class); + IndicesService indicesService = mock(IndicesService.class); + // MetaDataCreateIndexService creates indices using its IndicesService instance to check mappings -> fake it here + try { + when(indicesService.createIndex(any(NodeServicesProvider.class), any(IndexMetaData.class), anyList())) + .then(invocationOnMock -> { + IndexService indexService = mock(IndexService.class); + IndexMetaData indexMetaData = (IndexMetaData)invocationOnMock.getArguments()[1]; + when(indexService.index()).thenReturn(indexMetaData.getIndex()); + MapperService mapperService = mock(MapperService.class); + when(indexService.mapperService()).thenReturn(mapperService); + when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList()); + when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {}); + return indexService; + }); + } catch (IOException e) { + throw new IllegalStateException(e); + } + + // services + TransportService transportService = new TransportService(settings, transport, threadPool, null); + MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) { + // metaData upgrader should do nothing + @Override + public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) { + return indexMetaData; + } + }; + NodeServicesProvider nodeServicesProvider = new NodeServicesProvider(threadPool, null, null, null, null, null, clusterService); + MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(settings, clusterService, allocationService, + metaDataIndexUpgradeService, nodeServicesProvider, indicesService); + MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(settings, clusterService, allocationService); + MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService, + allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new IndexNameExpressionResolver(settings)); + MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService, + allocationService, Version.CURRENT, new AliasValidator(settings), Collections.emptySet(), environment, + nodeServicesProvider, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + + transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool, + indexStateService, clusterSettings, actionFilters, indexNameExpressionResolver, destructiveOperations); + transportOpenIndexAction = new TransportOpenIndexAction(settings, transportService, + clusterService, threadPool, indexStateService, actionFilters, indexNameExpressionResolver, destructiveOperations); + transportDeleteIndexAction = new TransportDeleteIndexAction(settings, transportService, + clusterService, threadPool, deleteIndexService, actionFilters, indexNameExpressionResolver, destructiveOperations); + transportUpdateSettingsAction = new TransportUpdateSettingsAction(settings, + transportService, clusterService, threadPool, metaDataUpdateSettingsService, actionFilters, indexNameExpressionResolver); + transportClusterRerouteAction = new TransportClusterRerouteAction(settings, + transportService, clusterService, threadPool, allocationService, actionFilters, indexNameExpressionResolver); + transportCreateIndexAction = new TransportCreateIndexAction(settings, + transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver); + } + + public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { + return execute(transportCreateIndexAction, request, state); + } + + public ClusterState closeIndices(ClusterState state, CloseIndexRequest request) { + return execute(transportCloseIndexAction, request, state); + } + + public ClusterState openIndices(ClusterState state, OpenIndexRequest request) { + return execute(transportOpenIndexAction, request, state); + } + + public ClusterState deleteIndices(ClusterState state, DeleteIndexRequest request) { + return execute(transportDeleteIndexAction, request, state); + } + + public ClusterState updateSettings(ClusterState state, UpdateSettingsRequest request) { + return execute(transportUpdateSettingsAction, request, state); + } + + public ClusterState reroute(ClusterState state, ClusterRerouteRequest request) { + return execute(transportClusterRerouteAction, request, state); + } + + public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { + RoutingAllocation.Result rerouteResult = allocationService.applyFailedShards(clusterState, failedShards); + return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + } + + public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) { + RoutingAllocation.Result rerouteResult = allocationService.applyStartedShards(clusterState, startedShards); + return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + } + + private , Response extends ActionResponse> ClusterState execute( + TransportMasterNodeAction masterNodeAction, Request request, ClusterState clusterState) { + return executeClusterStateUpdateTask(clusterState, () -> { + try { + TransportMasterNodeActionUtils.runMasterOperation(masterNodeAction, request, clusterState, new PlainActionFuture<>()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + + private ClusterState executeClusterStateUpdateTask(ClusterState state, Runnable runnable) { + ClusterState[] result = new ClusterState[1]; + doAnswer(invocationOnMock -> { + ClusterStateUpdateTask task = (ClusterStateUpdateTask)invocationOnMock.getArguments()[1]; + result[0] = task.execute(state); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class)); + runnable.run(); + assertThat(result[0], notNullValue()); + return result[0]; + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java new file mode 100644 index 00000000000..8c63c001a1e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -0,0 +1,281 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.indices.recovery.RecoveryTargetService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.Executor; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndicesClusterStateServiceTestCase { + + private final ClusterStateChanges cluster = new ClusterStateChanges(); + + public void testRandomClusterStateUpdates() { + // we have an IndicesClusterStateService per node in the cluster + final Map clusterStateServiceMap = new HashMap<>(); + ClusterState state = randomInitialClusterState(clusterStateServiceMap); + + // each of the following iterations represents a new cluster state update processed on all nodes + for (int i = 0; i < 30; i++) { + logger.info("Iteration {}", i); + final ClusterState previousState = state; + + // calculate new cluster state + for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states + state = randomlyUpdateClusterState(state, clusterStateServiceMap); + } + + // apply cluster state to nodes (incl. master) + for (DiscoveryNode node : state.nodes()) { + IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); + ClusterState localState = adaptClusterStateToLocalNode(state, node); + ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node); + indicesClusterStateService.clusterChanged(new ClusterChangedEvent("simulated change " + i, localState, previousLocalState)); + + // check that cluster state has been properly applied to node + assertClusterStateMatchesNodeState(localState, indicesClusterStateService); + } + } + + // TODO: check if we can go to green by starting all shards and finishing all iterations + logger.info("Final cluster state: {}", state.prettyPrint()); + } + + public ClusterState randomInitialClusterState(Map clusterStateServiceMap) { + List allNodes = new ArrayList<>(); + DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master + allNodes.add(localNode); + // at least two nodes that have the data role so that we can allocate shards + allNodes.add(createNode(DiscoveryNode.Role.DATA)); + allNodes.add(createNode(DiscoveryNode.Role.DATA)); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + allNodes.add(createNode()); + } + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + // add nodes to clusterStateServiceMap + updateNodes(state, clusterStateServiceMap); + return state; + } + + private void updateNodes(ClusterState state, Map clusterStateServiceMap) { + for (DiscoveryNode node : state.nodes()) { + clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> { + IndicesClusterStateService ics = createIndicesClusterStateService(); + ics.start(); + return ics; + }); + } + + for (Iterator> it = clusterStateServiceMap.entrySet().iterator(); it.hasNext(); ) { + DiscoveryNode node = it.next().getKey(); + if (state.nodes().nodeExists(node.getId()) == false) { + it.remove(); + } + } + } + + public ClusterState randomlyUpdateClusterState(ClusterState state, + Map clusterStateServiceMap) { + // randomly create new indices (until we have 200 max) + for (int i = 0; i < randomInt(5); i++) { + if (state.metaData().indices().size() > 200) { + break; + } + String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT); + CreateIndexRequest request = new CreateIndexRequest(name, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)) + .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2)) + .build()); + state = cluster.createIndex(state, request); + assertTrue(state.metaData().hasIndex(name)); + } + + // randomly delete indices + Set indicesToDelete = new HashSet<>(); + int numberOfIndicesToDelete = randomInt(Math.min(2, state.metaData().indices().size())); + for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metaData().indices().keys().toArray(String.class))) { + indicesToDelete.add(state.metaData().index(index).getIndex().getName()); + } + if (indicesToDelete.isEmpty() == false) { + DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()])); + state = cluster.deleteIndices(state, deleteRequest); + for (String index : indicesToDelete) { + assertFalse(state.metaData().hasIndex(index)); + } + } + + // randomly close indices + int numberOfIndicesToClose = randomInt(Math.min(1, state.metaData().indices().size())); + for (String index : randomSubsetOf(numberOfIndicesToClose, state.metaData().indices().keys().toArray(String.class))) { + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName()); + state = cluster.closeIndices(state, closeIndexRequest); + } + + // randomly open indices + int numberOfIndicesToOpen = randomInt(Math.min(1, state.metaData().indices().size())); + for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metaData().indices().keys().toArray(String.class))) { + OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName()); + state = cluster.openIndices(state, openIndexRequest); + } + + // randomly update settings + Set indicesToUpdate = new HashSet<>(); + boolean containsClosedIndex = false; + int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metaData().indices().size())); + for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metaData().indices().keys().toArray(String.class))) { + indicesToUpdate.add(state.metaData().index(index).getIndex().getName()); + if (state.metaData().index(index).getState() == IndexMetaData.State.CLOSE) { + containsClosedIndex = true; + } + } + if (indicesToUpdate.isEmpty() == false) { + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest( + indicesToUpdate.toArray(new String[indicesToUpdate.size()])); + Settings.Builder settings = Settings.builder(); + if (containsClosedIndex == false) { + settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2)); + } + settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s"); + updateSettingsRequest.settings(settings.build()); + state = cluster.updateSettings(state, updateSettingsRequest); + } + + // randomly reroute + if (rarely()) { + state = cluster.reroute(state, new ClusterRerouteRequest()); + } + + // randomly start and fail allocated shards + List startedShards = new ArrayList<>(); + List failedShards = new ArrayList<>(); + for (DiscoveryNode node : state.nodes()) { + IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); + MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService; + for (MockIndexService indexService : indicesService) { + for (MockIndexShard indexShard : indexService) { + ShardRouting persistedShardRouting = indexShard.routingEntry(); + if (persistedShardRouting.initializing() && randomBoolean()) { + startedShards.add(persistedShardRouting); + } else if (rarely()) { + failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception())); + } + } + } + } + state = cluster.applyFailedShards(state, failedShards); + state = cluster.applyStartedShards(state, startedShards); + + // randomly add and remove nodes (except current master) + if (rarely()) { + if (randomBoolean()) { + // add node + if (state.nodes().getSize() < 10) { + DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build(); + state = ClusterState.builder(state).nodes(newNodes).build(); + state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave + updateNodes(state, clusterStateServiceMap); + } + } else { + // remove node + if (state.nodes().getDataNodes().size() > 3) { + DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class)); + if (discoveryNode.equals(state.nodes().getMasterNode()) == false) { + DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build(); + state = ClusterState.builder(state).nodes(newNodes).build(); + state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join + updateNodes(state, clusterStateServiceMap); + } + } + } + } + + // TODO: go masterless? + + return state; + } + + protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { + Set roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values()))); + for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) { + roles.add(mustHaveRole); + } + return new DiscoveryNode("node_" + randomAsciiOfLength(8), DummyTransportAddress.INSTANCE, Collections.emptyMap(), roles, + Version.CURRENT); + } + + private static ClusterState adaptClusterStateToLocalNode(ClusterState state, DiscoveryNode node) { + return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); + } + + private IndicesClusterStateService createIndicesClusterStateService() { + final ThreadPool threadPool = mock(ThreadPool.class); + final Executor executor = mock(Executor.class); + when(threadPool.generic()).thenReturn(executor); + final MockIndicesService indicesService = new MockIndicesService(); + final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, null); + final ClusterService clusterService = mock(ClusterService.class); + final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService, + transportService, null, null); + final RecoveryTargetService recoveryTargetService = new RecoveryTargetService(Settings.EMPTY, threadPool, + transportService, null, clusterService); + final ShardStateAction shardStateAction = mock(ShardStateAction.class); + return new IndicesClusterStateService(Settings.EMPTY, indicesService, clusterService, + threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, null); + } + +} diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 3565cf0147d..a4096fde9da 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.sort.SortOrder; @@ -78,6 +79,7 @@ public class IndexStatsIT extends ESIntegTestCase { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return Settings.builder().put(super.nodeSettings(nodeOrdinal)) .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .build(); } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 7e01f575822..398ef64bc92 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -126,6 +126,8 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } } + // norelease + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-7325") public void testLimitsBoundingBox() throws Exception { Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index b8b04a8bc33..0debdb263af 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -30,7 +30,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.apache.lucene.spatial.util.GeoProjectionUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -42,6 +41,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.LineStringBuilder; import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; @@ -540,7 +540,7 @@ public class GeoFilterIT extends ESIntegTestCase { } public static double distance(double lat1, double lon1, double lat2, double lon2) { - return GeoProjectionUtils.SEMIMAJOR_AXIS * DistanceUtils.distHaversineRAD( + return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD( DistanceUtils.toRadians(lat1), DistanceUtils.toRadians(lon1), DistanceUtils.toRadians(lat2), diff --git a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java index ba378a3c404..e0aec941487 100644 --- a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -318,13 +318,7 @@ public class SimpleNestedIT extends ESIntegTestCase { assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); Explanation explanation = searchResponse.getHits().hits()[0].explanation(); assertThat(explanation.getValue(), equalTo(2f)); - assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on child doc range from 0 to 1\n")); - // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2) -// assertThat(explanation.getDetails().length, equalTo(2)); -// assertThat(explanation.getDetails()[0].getValue(), equalTo(1f)); -// assertThat(explanation.getDetails()[0].getDescription(), equalTo("Child[0]")); -// assertThat(explanation.getDetails()[1].getValue(), equalTo(1f)); -// assertThat(explanation.getDetails()[1].getDescription(), equalTo("Child[1]")); + assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on 2 child docs in range from 0 to 1")); } public void testSimpleNestedSorting() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java new file mode 100644 index 00000000000..848b230b3fa --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -0,0 +1,403 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.aggregation; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; +import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregator; +import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.aggregation.AggregationTimingType; +import org.elasticsearch.test.ESIntegTestCase; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; + +@ESIntegTestCase.SuiteScopeTestCase +public class AggregationProfilerIT extends ESIntegTestCase { + + + private static final String NUMBER_FIELD = "number"; + private static final String TAG_FIELD = "tag"; + private static final String STRING_FIELD = "string_field"; + + @Override + protected int numberOfShards() { + return 1; + } + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx") + .addMapping("type", STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword").get()); + List builders = new ArrayList<>(); + + String[] randomStrings = new String[randomIntBetween(2, 10)]; + for (int i = 0; i < randomStrings.length; i++) { + randomStrings[i] = randomAsciiOfLength(10); + } + + for (int i = 0; i < 5; i++) { + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject() + .field(STRING_FIELD, randomFrom(randomStrings)) + .field(NUMBER_FIELD, randomIntBetween(0, 9)) + .field(TAG_FIELD, randomBoolean() ? "more" : "less") + .endObject())); + } + + indexRandom(true, builders); + createIndex("idx_unmapped"); + ensureSearchable(); + } + + public void testSimpleProfile() { + SearchResponse response = client().prepareSearch("idx").setProfile(true) + .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)).get(); + assertSearchResponse(response); + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (ProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName())); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(breakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(breakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(breakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(breakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(breakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(breakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(breakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + + } + } + + public void testMultiLevelProfile() { + SearchResponse response = client().prepareSearch("idx").setProfile(true) + .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L) + .subAggregation(terms("terms").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)))).get(); + assertSearchResponse(response); + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (ProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName())); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + + public void testComplexProfile() { + SearchResponse response = client().prepareSearch("idx").setProfile(true) + .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L) + .subAggregation(terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD))) + .subAggregation(terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation(terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD))))) + .get(); + assertSearchResponse(response); + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (ProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName())); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); + + ProfileResult tagsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName())); + assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + ProfileResult avgAggResult = tagsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult maxAggResult = tagsAggResult.getProfiledChildren().get(1); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult stringsAggResult = histoAggResult.getProfiledChildren().get(1); + assertThat(stringsAggResult, notNullValue()); + assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName())); + assertThat(stringsAggResult.getLuceneDescription(), equalTo("strings")); + assertThat(stringsAggResult.getTime(), greaterThan(0L)); + Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(stringsBreakdown, notNullValue()); + assertThat(stringsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(stringsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(stringsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(stringsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(stringsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(stringsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(stringsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(stringsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); + + avgAggResult = stringsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = stringsAggResult.getProfiledChildren().get(1); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + tagsAggResult = stringsAggResult.getProfiledChildren().get(2); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName())); + assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + avgAggResult = tagsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = tagsAggResult.getProfiledChildren().get(1); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L)); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue()); + assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L)); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + + public void testNoProfile() { + SearchResponse response = client().prepareSearch("idx").setProfile(false) + .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L) + .subAggregation(terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD))) + .subAggregation(terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation(terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD))))) + .get(); + assertSearchResponse(response); + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(0)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 371aaadd3a7..b6935f021d4 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -85,8 +85,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shard : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shard.getValue()) { + for (Map.Entry shard : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -163,8 +163,9 @@ public class QueryProfilerIT extends ESIntegTestCase { nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001)); } - assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" - + vanillaResponse.getHits().totalHits() + "]", + assertThat( + "Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + + vanillaResponse.getHits().totalHits() + "]", vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits())); SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); @@ -203,12 +204,12 @@ public class QueryProfilerIT extends ESIntegTestCase { .setSearchType(SearchType.QUERY_THEN_FETCH) .execute().actionGet(); - Map> p = resp.getProfileResults(); + Map p = resp.getProfileResults(); assertNotNull(p); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertEquals(result.getQueryName(), "TermQuery"); assertEquals(result.getLuceneDescription(), "field1:one"); @@ -250,12 +251,12 @@ public class QueryProfilerIT extends ESIntegTestCase { .setSearchType(SearchType.QUERY_THEN_FETCH) .execute().actionGet(); - Map> p = resp.getProfileResults(); + Map p = resp.getProfileResults(); assertNotNull(p); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertEquals(result.getQueryName(), "BooleanQuery"); assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); @@ -322,8 +323,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -374,8 +375,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -421,8 +422,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -468,8 +469,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -514,8 +515,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); @@ -569,8 +570,8 @@ public class QueryProfilerIT extends ESIntegTestCase { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { - for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); assertNotNull(result.getLuceneDescription()); diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java similarity index 96% rename from core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java rename to core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index 7488dbceee7..ffad39bc3f2 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java +++ b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -51,7 +51,7 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class ProfileTests extends ESTestCase { +public class QueryProfilerTests extends ESTestCase { static Directory dir; static IndexReader reader; @@ -90,7 +90,7 @@ public class ProfileTests extends ESTestCase { searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); - List results = profiler.getQueryTree(); + List results = profiler.getTree(); assertEquals(1, results.size()); Map breakdown = results.get(0).getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); @@ -109,7 +109,7 @@ public class ProfileTests extends ESTestCase { searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed - List results = profiler.getQueryTree(); + List results = profiler.getTree(); assertEquals(1, results.size()); Map breakdown = results.get(0).getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); @@ -128,7 +128,7 @@ public class ProfileTests extends ESTestCase { searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats - List results = profiler.getQueryTree(); + List results = profiler.getTree(); assertEquals(0, results.size()); long rewriteTime = profiler.getRewriteTime(); @@ -144,7 +144,7 @@ public class ProfileTests extends ESTestCase { searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); - List results = profiler.getQueryTree(); + List results = profiler.getTree(); assertEquals(1, results.size()); Map breakdown = results.get(0).getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); diff --git a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 9751df26329..e94fe21a170 100644 --- a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -443,6 +443,6 @@ public class SimpleSearchIT extends ESIntegTestCase { assertThat(e.toString(), containsString("Rescore window [" + windowSize + "] is too large. It must " + "be less than [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY))); assertThat(e.toString(), containsString( - "This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting.")); + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting.")); } } diff --git a/core/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java b/core/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java index ad93d14f21f..4a27dcb5964 100644 --- a/core/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/core/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.List; import java.util.ArrayList; -import java.util.Set; import java.util.HashSet; import java.util.concurrent.ExecutionException; @@ -43,7 +42,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.startsWith; public class SearchSliceIT extends ESIntegTestCase { @@ -71,7 +69,8 @@ public class SearchSliceIT extends ESIntegTestCase { .endObject().string(); int numberOfShards = randomIntBetween(1, 7); assertAcked(client().admin().indices().prepareCreate("test") - .setSettings("number_of_shards", numberOfShards) + .setSettings("number_of_shards", numberOfShards, + "index.max_slices_per_scroll", 10000) .addMapping("type", mapping)); ensureGreen(); diff --git a/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip b/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip new file mode 100644 index 00000000000..9590f8dbd66 Binary files /dev/null and b/core/src/test/resources/indices/bwc/missing-checksum-repo-2.3.4.zip differ diff --git a/distribution/licenses/lucene-analyzers-common-6.0.1.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.0.1.jar.sha1 deleted file mode 100644 index b581809a004..00000000000 --- a/distribution/licenses/lucene-analyzers-common-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53953c1a9b097f83209c84a422cf8f9d271f47c1 \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..bb4cc98e068 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +bf73c03e6b83f8e696133f40b9b1fc3381750149 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.0.1.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.0.1.jar.sha1 deleted file mode 100644 index 5433f09c993..00000000000 --- a/distribution/licenses/lucene-backward-codecs-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3647088603be84b8f4916ef86954e3336b98d254 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..a8eb4883e15 --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +8bc384f55faf99b6d6cee6f34df4fbd3145afb4d \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.0.1.jar.sha1 b/distribution/licenses/lucene-core-6.0.1.jar.sha1 deleted file mode 100644 index 6bcd7fc87f4..00000000000 --- a/distribution/licenses/lucene-core-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40ccd40bec54266a10aa1f81c565914ede8c0ca0 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-core-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..012fea19fdc --- /dev/null +++ b/distribution/licenses/lucene-core-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +fe19e7558440e10db4bd7150931dff6a7cf73243 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.0.1.jar.sha1 b/distribution/licenses/lucene-grouping-6.0.1.jar.sha1 deleted file mode 100644 index b132acc9112..00000000000 --- a/distribution/licenses/lucene-grouping-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -010daaae60227fbe719ca95e9b6fcdb5c38d4eba \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-grouping-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..3b3338ffd2f --- /dev/null +++ b/distribution/licenses/lucene-grouping-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +c3f0de4cdd185d23bce66c580d9c12adb98182a5 \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.0.1.jar.sha1 b/distribution/licenses/lucene-highlighter-6.0.1.jar.sha1 deleted file mode 100644 index 95d4b3edab9..00000000000 --- a/distribution/licenses/lucene-highlighter-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -65d74c3642e6a86ba905045473b17cc84826527e \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-highlighter-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..255812ed5fb --- /dev/null +++ b/distribution/licenses/lucene-highlighter-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +ffb7087267bb6076b00c90f97ee36ebe23ea0662 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.0.1.jar.sha1 b/distribution/licenses/lucene-join-6.0.1.jar.sha1 deleted file mode 100644 index 07392cf260f..00000000000 --- a/distribution/licenses/lucene-join-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2105e2826ce93d1f764e5a0a3afa9ee461d556c1 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-join-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..4231857b4e7 --- /dev/null +++ b/distribution/licenses/lucene-join-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +d071ad17bed58b3267f6fa0b2a8211f8fe18c912 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.0.1.jar.sha1 b/distribution/licenses/lucene-memory-6.0.1.jar.sha1 deleted file mode 100644 index b9820103d3f..00000000000 --- a/distribution/licenses/lucene-memory-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e2cde0688e487a27d08df0c2d81d492b1f4cdc2a \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-memory-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..19aa64ccd80 --- /dev/null +++ b/distribution/licenses/lucene-memory-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +f5e9b6eefe580a7f65276aca3192ca5796332509 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.0.1.jar.sha1 b/distribution/licenses/lucene-misc-6.0.1.jar.sha1 deleted file mode 100644 index 2670ab628df..00000000000 --- a/distribution/licenses/lucene-misc-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6e59996fc324319d695e41cf25e30e5f1e4c182 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-misc-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..8480fcc3a49 --- /dev/null +++ b/distribution/licenses/lucene-misc-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +6b84a79c37b01197130cceb65e5573794f073df1 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.0.1.jar.sha1 b/distribution/licenses/lucene-queries-6.0.1.jar.sha1 deleted file mode 100644 index acaa53f1f8e..00000000000 --- a/distribution/licenses/lucene-queries-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b0e5862a676ff9e55a1bc6ca37ad578a25cb38 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-queries-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..a3ed70c12af --- /dev/null +++ b/distribution/licenses/lucene-queries-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +a9d51b77395dfdd7e6c4cf8c8506ebca5e1bb374 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.0.1.jar.sha1 b/distribution/licenses/lucene-queryparser-6.0.1.jar.sha1 deleted file mode 100644 index 48c91d68f44..00000000000 --- a/distribution/licenses/lucene-queryparser-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24f7ba0707aa01be2dd7749adff1659262be8f33 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-queryparser-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..6bf59bc00e7 --- /dev/null +++ b/distribution/licenses/lucene-queryparser-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +e322f004e574df119ba08dd8751a743422a46724 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.0.1.jar.sha1 b/distribution/licenses/lucene-sandbox-6.0.1.jar.sha1 deleted file mode 100644 index ef843328aa0..00000000000 --- a/distribution/licenses/lucene-sandbox-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0faf4c0d7e0adb6fccd830a2d5797d4176b579fe \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-sandbox-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..79567427105 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +c7cb119652c906adcdf7fe64445c76d057329d63 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.0.1.jar.sha1 b/distribution/licenses/lucene-spatial-6.0.1.jar.sha1 deleted file mode 100644 index 25e7232ac14..00000000000 --- a/distribution/licenses/lucene-spatial-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d94d006251c904de3f1503c64746400877d6fa3 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-spatial-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..2b06178aaad --- /dev/null +++ b/distribution/licenses/lucene-spatial-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +ca6c17fe31884e968ae63fd475ce6532b767c7fa \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.0.1.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.0.1.jar.sha1 deleted file mode 100644 index d421e1b053c..00000000000 --- a/distribution/licenses/lucene-spatial-extras-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3de19dbdb889fe87791dae291ac3b340586854c4 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..9c487b7746f --- /dev/null +++ b/distribution/licenses/lucene-spatial-extras-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +49235405e40757474aaa9e8e54946b67fe2a01d9 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.0.1.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.0.1.jar.sha1 deleted file mode 100644 index 348f501bb52..00000000000 --- a/distribution/licenses/lucene-spatial3d-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b1b7a754e83e2d58a819afa279b20b08b48c9c1 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..1eaab9f9955 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +39f6b29c428327860c1a342bd57800e79ad92ef5 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.0.1.jar.sha1 b/distribution/licenses/lucene-suggest-6.0.1.jar.sha1 deleted file mode 100644 index 2cb6272d826..00000000000 --- a/distribution/licenses/lucene-suggest-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55886bdaf16ecc6948e94b527837eaa1f16fe988 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.1.0-snapshot-3a57bea.jar.sha1 b/distribution/licenses/lucene-suggest-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..948dfc4c2b9 --- /dev/null +++ b/distribution/licenses/lucene-suggest-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +4e9f713d34fd4208bf308ac59132216f96521f13 \ No newline at end of file diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 2d5741c2b9e..bd6000c3de7 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -148,7 +148,7 @@ The `pattern` analyzer accepts the following parameters: `flags`:: Java regular expression http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#field.summary[flags]. - lags should be pipe-separated, eg `"CASE_INSENSITIVE|COMMENTS"`. + Flags should be pipe-separated, eg `"CASE_INSENSITIVE|COMMENTS"`. `lowercase`:: diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 72adefa5aec..24cd203ae26 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -21,6 +21,11 @@ The `pattern_replace` character filter accepts the following parameters: `$1`..`$9` syntax, as explained http://docs.oracle.com/javase/8/docs/api/java/util/regex/Matcher.html#appendReplacement-java.lang.StringBuffer-java.lang.String-[here]. +`flags`:: + + Java regular expression http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#field.summary[flags]. + Flags should be pipe-separated, eg `"CASE_INSENSITIVE|COMMENTS"`. + [float] === Example configuration diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index 3e5153341c8..dd829e19bc3 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -28,6 +28,7 @@ APIs that support it. Take no refresh related actions. The changes made by this request will be made visible at some point after the request returns. +[float] === Choosing which setting to use Unless you have a good reason to wait for the change to become visible always @@ -60,6 +61,7 @@ refresh immediately, `refresh=true` will affect other ongoing request. In general, if you have a running system you don't wish to disturb then `refresh=wait_for` is a smaller modification. +[float] === `refresh=wait_for` Can Force a Refresh If a `refresh=wait_for` request comes in when there are already @@ -74,6 +76,7 @@ contain `"forced_refresh": true`. Bulk requests only take up one slot on each shard that they touch no matter how many times they modify the shard. +[float] === Examples These will create a document and immediately refresh the index so it is visible: diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 9f9558aa979..a082bf3ba4c 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -263,4 +263,7 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' ' -------------------------------------------------- -For append only time-based indices, the `timestamp` field can be used safely. \ No newline at end of file +For append only time-based indices, the `timestamp` field can be used safely. + +NOTE: By default the maximum number of slices allowed per scroll is limited to 1024. +You can update the `index.max_slices_per_scroll` index setting to bypass this limit. \ No newline at end of file diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 2f2e8882032..fd3dbffc1d6 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -26,6 +26,8 @@ PUT /my_index } } } + +GET /_cluster/health?wait_for_status=yellow -------------------------------------------------- // CONSOLE diff --git a/modules/lang-expression/licenses/lucene-expressions-6.0.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.0.1.jar.sha1 deleted file mode 100644 index 7b3f5a1cef9..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b76056dbd40fb51dc5e8ef71e1919ad23e635a1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.1.0-snapshot-3a57bea.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..271088e86c9 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +e5a4b673918f448006c0531799706abebe9a1db0 \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 11f0e4c191a..78d95edecff 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -23,6 +23,7 @@ package org.elasticsearch.messy.tests; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -41,7 +42,6 @@ import java.util.Collections; import java.util.List; import java.util.Random; -import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -218,7 +218,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getTotalHits(), equalTo(20L)); for (int i = 0; i < 10; i++) { - assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), closeTo(i, TOLERANCE)); + assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), closeTo(i, GeoUtils.TOLERANCE)); } } diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index bf141b53cbd..2e22cd21a2e 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -22,7 +22,15 @@ parser grammar PainlessParser; options { tokenVocab=PainlessLexer; } source - : statement* EOF + : function* statement* EOF + ; + +function + : decltype ID parameters block + ; + +parameters + : LP ( decltype ID ( COMMA decltype ID )* )? RP ; // Note we use a predicate on the if/else case here to prevent the @@ -144,6 +152,7 @@ primary[boolean c] returns [boolean s = true] | { $c }? LP unary[true] RP # chainprec | STRING # string | ID # variable + | ID arguments # calllocal | NEW TYPE arguments # newobject ; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index d0a17f64a48..8fa9f5d583e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.bootstrap.BootstrapInfo; -import org.elasticsearch.painless.Variables.Reserved; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.node.SSource; @@ -36,8 +35,7 @@ import static org.elasticsearch.painless.WriterConstants.CLASS_NAME; /** * The Compiler is the entry point for generating a Painless script. The compiler will receive a Painless * tree based on the type of input passed in (currently only ANTLR). Two passes will then be run over the tree, - * one for analysis using the {@link Analyzer} and another to generate the actual byte code using ASM in - * the {@link Writer}. + * one for analysis and another to generate the actual byte code using ASM using the root of the tree {@link SSource}. */ final class Compiler { @@ -100,18 +98,17 @@ final class Compiler { " plugin if a script longer than this length is a requirement."); } - Reserved reserved = new Reserved(); - SSource root = Walker.buildPainlessTree(name, source, reserved, settings); - Variables variables = Analyzer.analyze(reserved, root); - BitSet expressions = new BitSet(source.length()); - byte[] bytes = Writer.write(settings, name, source, variables, root, expressions); + SSource root = Walker.buildPainlessTree(name, source, settings); + + root.analyze(); + root.write(); try { - Class clazz = loader.define(CLASS_NAME, bytes); + Class clazz = loader.define(CLASS_NAME, root.getBytes()); java.lang.reflect.Constructor constructor = clazz.getConstructor(String.class, String.class, BitSet.class); - return constructor.newInstance(name, source, expressions); + return constructor.newInstance(name, source, root.getExpressions()); } catch (Exception exception) { // Catch everything to let the user know this is something caused internally. throw new IllegalStateException("An internal error occurred attempting to define the script [" + name + "].", exception); } @@ -130,11 +127,12 @@ final class Compiler { " plugin if a script longer than this length is a requirement."); } - Reserved reserved = new Reserved(); - SSource root = Walker.buildPainlessTree(name, source, reserved, settings); - Variables variables = Analyzer.analyze(reserved, root); + SSource root = Walker.buildPainlessTree(name, source, settings); - return Writer.write(settings, name, source, variables, root, new BitSet(source.length())); + root.analyze(); + root.write(); + + return root.getBytes(); } /** diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 1285b4cfb5a..8a91bd7d7fd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -45,9 +45,9 @@ import java.util.Spliterator; * methods and fields during at both compile-time and runtime. */ public final class Definition { - + private static final List DEFINITION_FILES = Collections.unmodifiableList( - Arrays.asList("org.elasticsearch.txt", + Arrays.asList("org.elasticsearch.txt", "java.lang.txt", "java.math.txt", "java.text.txt", @@ -188,8 +188,8 @@ public final class Definition { public final int modifiers; public final MethodHandle handle; - private Method(String name, Struct owner, Type rtn, List arguments, - org.objectweb.asm.commons.Method method, int modifiers, MethodHandle handle) { + public Method(String name, Struct owner, Type rtn, List arguments, + org.objectweb.asm.commons.Method method, int modifiers, MethodHandle handle) { this.name = name; this.owner = owner; this.rtn = rtn; @@ -286,7 +286,7 @@ public final class Definition { public final Map staticMembers; public final Map members; - + private final SetOnce functionalMethod; private Struct(final String name, final Class clazz, final org.objectweb.asm.Type type) { @@ -300,8 +300,8 @@ public final class Definition { staticMembers = new HashMap<>(); members = new HashMap<>(); - - functionalMethod = new SetOnce(); + + functionalMethod = new SetOnce<>(); } private Struct(final Struct struct) { @@ -315,7 +315,7 @@ public final class Definition { staticMembers = Collections.unmodifiableMap(struct.staticMembers); members = Collections.unmodifiableMap(struct.members); - + functionalMethod = struct.functionalMethod; } @@ -342,8 +342,8 @@ public final class Definition { public int hashCode() { return name.hashCode(); } - - /** + + /** * If this class is a functional interface according to JLS, returns its method. * Otherwise returns null. */ @@ -637,7 +637,7 @@ public final class Definition { final org.objectweb.asm.commons.Method asm = org.objectweb.asm.commons.Method.getMethod(reflect); final Type returnType = getTypeInternal("void"); final MethodHandle handle; - + try { handle = MethodHandles.publicLookup().in(owner.clazz).unreflectConstructor(reflect); } catch (final IllegalAccessException exception) { @@ -645,7 +645,7 @@ public final class Definition { " not found for class [" + owner.clazz.getName() + "]" + " with arguments " + Arrays.toString(classes) + "."); } - + final Method constructor = new Method(name, owner, returnType, Arrays.asList(args), asm, reflect.getModifiers(), handle); owner.constructors.put(methodKey, constructor); @@ -755,7 +755,7 @@ public final class Definition { " method [" + name + "]" + " within the struct [" + owner.name + "]."); } - + final org.objectweb.asm.commons.Method asm = org.objectweb.asm.commons.Method.getMethod(reflect); MethodHandle handle; @@ -856,7 +856,7 @@ public final class Definition { throw new ClassCastException("Child struct [" + child.name + "]" + " is not a super type of owner struct [" + owner.name + "] in copy."); } - + for (Map.Entry kvPair : child.methods.entrySet()) { MethodKey methodKey = kvPair.getKey(); Method method = kvPair.getValue(); @@ -953,7 +953,7 @@ public final class Definition { runtimeMap.put(struct.clazz, new RuntimeClass(methods, getters, setters)); } - + /** computes the functional interface method for a class, or returns null */ private Method computeFunctionalInterfaceMethod(Struct clazz) { if (!clazz.clazz.isInterface()) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java similarity index 52% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java index 4905011520a..d102114bdeb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java @@ -19,16 +19,21 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.MethodKey; import org.elasticsearch.painless.Definition.Type; import java.util.ArrayDeque; +import java.util.Collections; import java.util.Deque; import java.util.Iterator; +import java.util.List; +import java.util.Map; /** - * Tracks variables across compilation phases. + * Tracks user defined methods and variables across compilation phases. */ -public final class Variables { +public final class Locals { /** * Tracks reserved variables. Must be given to any source of input @@ -36,7 +41,15 @@ public final class Variables { * are known ahead of time to assign appropriate slots without * being wasteful. */ - public static final class Reserved { + public interface Reserved { + void markReserved(String name); + boolean isReserved(String name); + + void setMaxLoopCounter(int max); + int getMaxLoopCounter(); + } + + public static final class ExecuteReserved implements Reserved { public static final String THIS = "#this"; public static final String PARAMS = "params"; public static final String SCORER = "#scorer"; @@ -46,10 +59,11 @@ public final class Variables { public static final String CTX = "ctx"; public static final String LOOP = "#loop"; - boolean score = false; - boolean ctx = false; - boolean loop = false; + private boolean score = false; + private boolean ctx = false; + private int maxLoopCounter = 0; + @Override public void markReserved(String name) { if (SCORE.equals(name)) { score = true; @@ -58,13 +72,53 @@ public final class Variables { } } + @Override public boolean isReserved(String name) { return name.equals(THIS) || name.equals(PARAMS) || name.equals(SCORER) || name.equals(DOC) || name.equals(VALUE) || name.equals(SCORE) || name.equals(CTX) || name.equals(LOOP); - } + } - public void usesLoop() { - loop = true; + public boolean usesScore() { + return score; + } + + public boolean usesCtx() { + return ctx; + } + + @Override + public void setMaxLoopCounter(int max) { + maxLoopCounter = max; + } + + @Override + public int getMaxLoopCounter() { + return maxLoopCounter; + } + } + + public static final class FunctionReserved implements Reserved { + public static final String THIS = "#this"; + public static final String LOOP = "#loop"; + + private int maxLoopCounter = 0; + + public void markReserved(String name) { + // Do nothing. + } + + public boolean isReserved(String name) { + return name.equals(THIS) || name.equals(LOOP); + } + + @Override + public void setMaxLoopCounter(int max) { + maxLoopCounter = max; + } + + @Override + public int getMaxLoopCounter() { + return maxLoopCounter; } } @@ -86,52 +140,97 @@ public final class Variables { } } - final Reserved reserved; + public static final class Parameter { + public final Location location; + public final String name; + public final Type type; + + public Parameter(Location location, String name, Type type) { + this.location = location; + this.name = name; + this.type = type; + } + } + + private final Reserved reserved; + private final Map methods; + private final Type rtnType; // TODO: this datastructure runs in linear time for nearly all operations. use linkedhashset instead? private final Deque scopes = new ArrayDeque<>(); private final Deque variables = new ArrayDeque<>(); - public Variables(Reserved reserved) { + public Locals(ExecuteReserved reserved, Map methods) { this.reserved = reserved; + this.methods = Collections.unmodifiableMap(methods); + this.rtnType = Definition.OBJECT_TYPE; incrementScope(); // Method variables. // This reference. Internal use only. - addVariable(null, Definition.getType("Object"), Reserved.THIS, true, true); + addVariable(null, Definition.getType("Object"), ExecuteReserved.THIS, true, true); // Input map of variables passed to the script. - addVariable(null, Definition.getType("Map"), Reserved.PARAMS, true, true); + addVariable(null, Definition.getType("Map"), ExecuteReserved.PARAMS, true, true); // Scorer parameter passed to the script. Internal use only. - addVariable(null, Definition.DEF_TYPE, Reserved.SCORER, true, true); + addVariable(null, Definition.DEF_TYPE, ExecuteReserved.SCORER, true, true); // Doc parameter passed to the script. TODO: Currently working as a Map, we can do better? - addVariable(null, Definition.getType("Map"), Reserved.DOC, true, true); + addVariable(null, Definition.getType("Map"), ExecuteReserved.DOC, true, true); // Aggregation _value parameter passed to the script. - addVariable(null, Definition.DEF_TYPE, Reserved.VALUE, true, true); + addVariable(null, Definition.DEF_TYPE, ExecuteReserved.VALUE, true, true); // Shortcut variables. // Document's score as a read-only double. - if (reserved.score) { - addVariable(null, Definition.DOUBLE_TYPE, Reserved.SCORE, true, true); + if (reserved.usesScore()) { + addVariable(null, Definition.DOUBLE_TYPE, ExecuteReserved.SCORE, true, true); } // The ctx map set by executable scripts as a read-only map. - if (reserved.ctx) { - addVariable(null, Definition.getType("Map"), Reserved.CTX, true, true); + if (reserved.usesCtx()) { + addVariable(null, Definition.getType("Map"), ExecuteReserved.CTX, true, true); } // Loop counter to catch infinite loops. Internal use only. - if (reserved.loop) { - addVariable(null, Definition.INT_TYPE, Reserved.LOOP, true, true); + if (reserved.getMaxLoopCounter() > 0) { + addVariable(null, Definition.INT_TYPE, ExecuteReserved.LOOP, true, true); } } + public Locals(FunctionReserved reserved, Locals locals, Type rtnType, List parameters) { + this.reserved = reserved; + this.methods = locals.methods; + this.rtnType = rtnType; + + incrementScope(); + + for (Parameter parameter : parameters) { + addVariable(parameter.location, parameter.type, parameter.name, false, false); + } + + // Loop counter to catch infinite loops. Internal use only. + if (reserved.getMaxLoopCounter() > 0) { + addVariable(null, Definition.INT_TYPE, ExecuteReserved.LOOP, true, true); + } + } + + public int getMaxLoopCounter() { + return reserved.getMaxLoopCounter(); + } + + public Method getMethod(MethodKey key) { + return methods.get(key); + } + + public Type getReturnType() { + return rtnType; + } + public void incrementScope() { scopes.push(0); } @@ -142,9 +241,9 @@ public final class Variables { while (remove > 0) { Variable variable = variables.pop(); - // TODO: is this working? the code reads backwards... + // This checks whether or not a variable is used when exiting a local scope. if (variable.read) { - throw variable.location.createError(new IllegalArgumentException("Variable [" + variable.name + "] never used.")); + throw variable.location.createError(new IllegalArgumentException("Variable [" + variable.name + "] is never used.")); } --remove; @@ -162,26 +261,30 @@ public final class Variables { } } - throw location.createError(new IllegalArgumentException("Variable [" + name + "] not defined.")); + throw location.createError(new IllegalArgumentException("Variable [" + name + "] is not defined.")); } - private boolean variableExists(String name) { - return variables.contains(name); + public boolean isVariable(String name) { + Iterator itr = variables.iterator(); + + while (itr.hasNext()) { + Variable variable = itr.next(); + + if (variable.name.equals(name)) { + return true; + } + } + + return false; } public Variable addVariable(Location location, Type type, String name, boolean readonly, boolean reserved) { if (!reserved && this.reserved.isReserved(name)) { - throw location.createError(new IllegalArgumentException("Variable name [" + name + "] is reserved.")); + throw location.createError(new IllegalArgumentException("Variable [" + name + "] is reserved.")); } - if (variableExists(name)) { - throw new IllegalArgumentException("Variable name [" + name + "] already defined."); - } - - try { - Definition.getType(name); - } catch (IllegalArgumentException exception) { - // Do nothing. + if (isVariable(name)) { + throw location.createError(new IllegalArgumentException("Variable [" + name + "] is already defined.")); } Variable previous = variables.peekFirst(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index d7c09a9c122..641a5582ab8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -90,10 +90,10 @@ public final class MethodWriter extends GeneratorAdapter { private final ClassWriter parent; private final BitSet statements; - private final Deque> stringConcatArgs = (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE == null) ? - null : new ArrayDeque<>(); + private final Deque> stringConcatArgs = + (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE == null) ? null : new ArrayDeque<>(); - MethodWriter(int access, Method method, ClassWriter cw, BitSet statements) { + public MethodWriter(int access, Method method, ClassWriter cw, BitSet statements) { super(Opcodes.ASM5, cw.visitMethod(access, method.getName(), method.getDescriptor(), null, null), access, method.getName(), method.getDescriptor()); @@ -104,7 +104,7 @@ public final class MethodWriter extends GeneratorAdapter { /** * @return A new {@link MethodWriter} with the specified access and signature. */ - MethodWriter newMethodWriter(int access, Method method) { + public MethodWriter newMethodWriter(int access, Method method) { return new MethodWriter(access, method, parent, statements); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java deleted file mode 100644 index 4d4a778d558..00000000000 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.painless; - -import org.elasticsearch.painless.Variables.Reserved; -import org.elasticsearch.painless.Variables.Variable; -import org.elasticsearch.painless.node.SSource; -import org.objectweb.asm.ClassWriter; -import org.objectweb.asm.Opcodes; -import org.objectweb.asm.commons.GeneratorAdapter; - -import static org.elasticsearch.painless.WriterConstants.BASE_CLASS_TYPE; -import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; -import static org.elasticsearch.painless.WriterConstants.CONSTRUCTOR; -import static org.elasticsearch.painless.WriterConstants.EXECUTE; -import static org.elasticsearch.painless.WriterConstants.MAP_GET; -import static org.elasticsearch.painless.WriterConstants.MAP_TYPE; - -import java.util.BitSet; - -/** - * Runs the writing phase of compilation using the Painless AST. - */ -final class Writer { - - static byte[] write(CompilerSettings settings, String name, String source, Variables variables, SSource root, BitSet expressions) { - return new Writer(settings, name, source, variables, root, expressions).getBytes(); - } - - private final CompilerSettings settings; - private final String scriptName; - private final String source; - private final Variables variables; - private final SSource root; - - private final ClassWriter writer; - private final MethodWriter adapter; - - private Writer(CompilerSettings settings, String name, String source, Variables variables, SSource root, BitSet expressions) { - this.settings = settings; - this.scriptName = name; - this.source = source; - this.variables = variables; - this.root = root; - - writer = new ClassWriter(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS); - - writeBegin(); - writeConstructor(); - - adapter = new MethodWriter(Opcodes.ACC_PUBLIC, EXECUTE, writer, expressions); - - writeExecute(); - writeEnd(); - } - - private void writeBegin() { - final int version = Opcodes.V1_8; - final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; - final String base = BASE_CLASS_TYPE.getInternalName(); - final String name = CLASS_TYPE.getInternalName(); - - // apply marker interface NeedsScore if we use the score! - final String interfaces[] = variables.reserved.score ? - new String[] { WriterConstants.NEEDS_SCORE_TYPE.getInternalName() } : null; - - writer.visit(version, access, name, null, base, interfaces); - writer.visitSource(Location.computeSourceName(scriptName,source), null); - } - - private void writeConstructor() { - final GeneratorAdapter constructor = new GeneratorAdapter(Opcodes.ACC_PUBLIC, CONSTRUCTOR, null, null, writer); - constructor.loadThis(); - constructor.loadArgs(); - constructor.invokeConstructor(org.objectweb.asm.Type.getType(Executable.class), CONSTRUCTOR); - constructor.returnValue(); - constructor.endMethod(); - } - - private void writeExecute() { - if (variables.reserved.score) { - // if the _score value is used, we do this once: - // final double _score = scorer.score(); - final Variable scorer = variables.getVariable(null, Reserved.SCORER); - final Variable score = variables.getVariable(null, Reserved.SCORE); - - adapter.visitVarInsn(Opcodes.ALOAD, scorer.slot); - adapter.invokeVirtual(WriterConstants.SCORER_TYPE, WriterConstants.SCORER_SCORE); - adapter.visitInsn(Opcodes.F2D); - adapter.visitVarInsn(Opcodes.DSTORE, score.slot); - } - - if (variables.reserved.ctx) { - // if the _ctx value is used, we do this once: - // final Map ctx = input.get("ctx"); - - final Variable input = variables.getVariable(null, Reserved.PARAMS); - final Variable ctx = variables.getVariable(null, Reserved.CTX); - - adapter.visitVarInsn(Opcodes.ALOAD, input.slot); - adapter.push(Reserved.CTX); - adapter.invokeInterface(MAP_TYPE, MAP_GET); - adapter.visitVarInsn(Opcodes.ASTORE, ctx.slot); - } - - if (variables.reserved.loop) { - // if there is infinite loop protection, we do this once: - // int #loop = settings.getMaxLoopCounter() - - final Variable loop = variables.getVariable(null, Reserved.LOOP); - - adapter.push(settings.getMaxLoopCounter()); - adapter.visitVarInsn(Opcodes.ISTORE, loop.slot); - } - - root.write(adapter); - adapter.endMethod(); - } - - private void writeEnd() { - writer.visitEnd(); - } - - private byte[] getBytes() { - return writer.toByteArray(); - } -} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index 7129df80a08..4ec835b02c5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -107,15 +107,15 @@ public final class WriterConstants { public final static Method DEF_LTE_CALL = getAsmMethod(boolean.class, "lte", Object.class, Object.class); public final static Method DEF_GT_CALL = getAsmMethod(boolean.class, "gt" , Object.class, Object.class); public final static Method DEF_GTE_CALL = getAsmMethod(boolean.class, "gte", Object.class, Object.class); - + /** invokedynamic bootstrap for lambda expression/method references */ public final static MethodType LAMBDA_BOOTSTRAP_TYPE = - MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, + MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class, Object[].class); public final static Handle LAMBDA_BOOTSTRAP_HANDLE = new Handle(Opcodes.H_INVOKESTATIC, Type.getInternalName(LambdaMetafactory.class), "altMetafactory", LAMBDA_BOOTSTRAP_TYPE.toMethodDescriptorString()); - + /** dynamic invokedynamic bootstrap for indy string concats (Java 9+) */ public final static Handle INDY_STRING_CONCAT_BOOTSTRAP_HANDLE; static { @@ -152,7 +152,7 @@ public final class WriterConstants { public final static Method CHECKEQUALS = getAsmMethod(boolean.class, "checkEquals", Object.class, Object.class); - public static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { + private static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { return new Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index a3ccc415028..5eca024e812 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -28,17 +28,17 @@ class PainlessParser extends Parser { STRING=68, TRUE=69, FALSE=70, NULL=71, TYPE=72, ID=73, DOTINTEGER=74, DOTID=75; public static final int - RULE_source = 0, RULE_statement = 1, RULE_trailer = 2, RULE_block = 3, - RULE_empty = 4, RULE_initializer = 5, RULE_afterthought = 6, RULE_declaration = 7, - RULE_decltype = 8, RULE_funcref = 9, RULE_declvar = 10, RULE_trap = 11, - RULE_delimiter = 12, RULE_expression = 13, RULE_unary = 14, RULE_chain = 15, - RULE_primary = 16, RULE_secondary = 17, RULE_dot = 18, RULE_brace = 19, - RULE_arguments = 20, RULE_argument = 21; + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_trailer = 4, RULE_block = 5, RULE_empty = 6, RULE_initializer = 7, + RULE_afterthought = 8, RULE_declaration = 9, RULE_decltype = 10, RULE_funcref = 11, + RULE_declvar = 12, RULE_trap = 13, RULE_delimiter = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_secondary = 19, + RULE_dot = 20, RULE_brace = 21, RULE_arguments = 22, RULE_argument = 23; public static final String[] ruleNames = { - "source", "statement", "trailer", "block", "empty", "initializer", "afterthought", - "declaration", "decltype", "funcref", "declvar", "trap", "delimiter", - "expression", "unary", "chain", "primary", "secondary", "dot", "brace", - "arguments", "argument" + "source", "function", "parameters", "statement", "trailer", "block", "empty", + "initializer", "afterthought", "declaration", "decltype", "funcref", "declvar", + "trap", "delimiter", "expression", "unary", "chain", "primary", "secondary", + "dot", "brace", "arguments", "argument" }; private static final String[] _LITERAL_NAMES = { @@ -113,6 +113,12 @@ class PainlessParser extends Parser { } public static class SourceContext extends ParserRuleContext { public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } + public List function() { + return getRuleContexts(FunctionContext.class); + } + public FunctionContext function(int i) { + return getRuleContext(FunctionContext.class,i); + } public List statement() { return getRuleContexts(StatementContext.class); } @@ -137,23 +143,39 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(47); + setState(51); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(44); + setState(48); + function(); + } + } + } + setState(53); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + } + setState(57); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(54); statement(); } } } - setState(49); + setState(59); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); } - setState(50); + setState(60); match(EOF); } } @@ -168,6 +190,136 @@ class PainlessParser extends Parser { return _localctx; } + public static class FunctionContext extends ParserRuleContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ParametersContext parameters() { + return getRuleContext(ParametersContext.class,0); + } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public FunctionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_function; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFunction(this); + else return visitor.visitChildren(this); + } + } + + public final FunctionContext function() throws RecognitionException { + FunctionContext _localctx = new FunctionContext(_ctx, getState()); + enterRule(_localctx, 2, RULE_function); + try { + enterOuterAlt(_localctx, 1); + { + setState(62); + decltype(); + setState(63); + match(ID); + setState(64); + parameters(); + setState(65); + block(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ParametersContext extends ParserRuleContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List decltype() { + return getRuleContexts(DecltypeContext.class); + } + public DecltypeContext decltype(int i) { + return getRuleContext(DecltypeContext.class,i); + } + public List ID() { return getTokens(PainlessParser.ID); } + public TerminalNode ID(int i) { + return getToken(PainlessParser.ID, i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ParametersContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_parameters; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitParameters(this); + else return visitor.visitChildren(this); + } + } + + public final ParametersContext parameters() throws RecognitionException { + ParametersContext _localctx = new ParametersContext(_ctx, getState()); + enterRule(_localctx, 4, RULE_parameters); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(67); + match(LP); + setState(79); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(68); + decltype(); + setState(69); + match(ID); + setState(76); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(70); + match(COMMA); + setState(71); + decltype(); + setState(72); + match(ID); + } + } + setState(78); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(81); + match(RP); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class StatementContext extends ParserRuleContext { public StatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -396,38 +548,38 @@ class PainlessParser extends Parser { public final StatementContext statement() throws RecognitionException { StatementContext _localctx = new StatementContext(_ctx, getState()); - enterRule(_localctx, 2, RULE_statement); + enterRule(_localctx, 6, RULE_statement); try { int _alt; - setState(130); - switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { + setState(161); + switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: _localctx = new IfContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(52); + setState(83); match(IF); - setState(53); + setState(84); match(LP); - setState(54); + setState(85); expression(0); - setState(55); + setState(86); match(RP); - setState(56); + setState(87); trailer(); - setState(60); - switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { + setState(91); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { case 1: { - setState(57); + setState(88); match(ELSE); - setState(58); + setState(89); trailer(); } break; case 2: { - setState(59); + setState(90); if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); } break; @@ -438,25 +590,25 @@ class PainlessParser extends Parser { _localctx = new WhileContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(62); + setState(93); match(WHILE); - setState(63); + setState(94); match(LP); - setState(64); + setState(95); expression(0); - setState(65); + setState(96); match(RP); - setState(68); - switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { + setState(99); + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(66); + setState(97); trailer(); } break; case 2: { - setState(67); + setState(98); empty(); } break; @@ -467,19 +619,19 @@ class PainlessParser extends Parser { _localctx = new DoContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(70); + setState(101); match(DO); - setState(71); + setState(102); block(); - setState(72); + setState(103); match(WHILE); - setState(73); + setState(104); match(LP); - setState(74); + setState(105); expression(0); - setState(75); + setState(106); match(RP); - setState(76); + setState(107); delimiter(); } break; @@ -487,54 +639,54 @@ class PainlessParser extends Parser { _localctx = new ForContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(78); + setState(109); match(FOR); - setState(79); + setState(110); match(LP); - setState(81); - switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { + setState(112); + switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: { - setState(80); + setState(111); initializer(); } break; } - setState(83); + setState(114); match(SEMICOLON); - setState(85); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + setState(116); + switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { case 1: { - setState(84); + setState(115); expression(0); } break; } - setState(87); + setState(118); match(SEMICOLON); - setState(89); - switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { + setState(120); + switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { case 1: { - setState(88); + setState(119); afterthought(); } break; } - setState(91); + setState(122); match(RP); - setState(94); - switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { + setState(125); + switch ( getInterpreter().adaptivePredict(_input,9,_ctx) ) { case 1: { - setState(92); + setState(123); trailer(); } break; case 2: { - setState(93); + setState(124); empty(); } break; @@ -545,21 +697,21 @@ class PainlessParser extends Parser { _localctx = new EachContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(96); + setState(127); match(FOR); - setState(97); + setState(128); match(LP); - setState(98); + setState(129); decltype(); - setState(99); + setState(130); match(ID); - setState(100); + setState(131); match(COLON); - setState(101); + setState(132); expression(0); - setState(102); + setState(133); match(RP); - setState(103); + setState(134); trailer(); } break; @@ -567,9 +719,9 @@ class PainlessParser extends Parser { _localctx = new DeclContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(105); + setState(136); declaration(); - setState(106); + setState(137); delimiter(); } break; @@ -577,9 +729,9 @@ class PainlessParser extends Parser { _localctx = new ContinueContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(108); + setState(139); match(CONTINUE); - setState(109); + setState(140); delimiter(); } break; @@ -587,9 +739,9 @@ class PainlessParser extends Parser { _localctx = new BreakContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(110); + setState(141); match(BREAK); - setState(111); + setState(142); delimiter(); } break; @@ -597,11 +749,11 @@ class PainlessParser extends Parser { _localctx = new ReturnContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(112); + setState(143); match(RETURN); - setState(113); + setState(144); expression(0); - setState(114); + setState(145); delimiter(); } break; @@ -609,11 +761,11 @@ class PainlessParser extends Parser { _localctx = new TryContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(116); + setState(147); match(TRY); - setState(117); + setState(148); block(); - setState(119); + setState(150); _errHandler.sync(this); _alt = 1; do { @@ -621,7 +773,7 @@ class PainlessParser extends Parser { case 1: { { - setState(118); + setState(149); trap(); } } @@ -629,9 +781,9 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(121); + setState(152); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,7,_ctx); + _alt = getInterpreter().adaptivePredict(_input,10,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -639,11 +791,11 @@ class PainlessParser extends Parser { _localctx = new ThrowContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(123); + setState(154); match(THROW); - setState(124); + setState(155); expression(0); - setState(125); + setState(156); delimiter(); } break; @@ -651,9 +803,9 @@ class PainlessParser extends Parser { _localctx = new ExprContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(127); + setState(158); expression(0); - setState(128); + setState(159); delimiter(); } break; @@ -690,21 +842,21 @@ class PainlessParser extends Parser { public final TrailerContext trailer() throws RecognitionException { TrailerContext _localctx = new TrailerContext(_ctx, getState()); - enterRule(_localctx, 4, RULE_trailer); + enterRule(_localctx, 8, RULE_trailer); try { - setState(134); - switch ( getInterpreter().adaptivePredict(_input,9,_ctx) ) { + setState(165); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(132); + setState(163); block(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(133); + setState(164); statement(); } break; @@ -743,30 +895,30 @@ class PainlessParser extends Parser { public final BlockContext block() throws RecognitionException { BlockContext _localctx = new BlockContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_block); + enterRule(_localctx, 10, RULE_block); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(136); + setState(167); match(LBRACK); - setState(140); + setState(171); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + _alt = getInterpreter().adaptivePredict(_input,13,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(137); + setState(168); statement(); } } } - setState(142); + setState(173); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + _alt = getInterpreter().adaptivePredict(_input,13,_ctx); } - setState(143); + setState(174); match(RBRACK); } } @@ -796,11 +948,11 @@ class PainlessParser extends Parser { public final EmptyContext empty() throws RecognitionException { EmptyContext _localctx = new EmptyContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_empty); + enterRule(_localctx, 12, RULE_empty); try { enterOuterAlt(_localctx, 1); { - setState(145); + setState(176); match(SEMICOLON); } } @@ -835,21 +987,21 @@ class PainlessParser extends Parser { public final InitializerContext initializer() throws RecognitionException { InitializerContext _localctx = new InitializerContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_initializer); + enterRule(_localctx, 14, RULE_initializer); try { - setState(149); - switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + setState(180); + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(147); + setState(178); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(148); + setState(179); expression(0); } break; @@ -883,11 +1035,11 @@ class PainlessParser extends Parser { public final AfterthoughtContext afterthought() throws RecognitionException { AfterthoughtContext _localctx = new AfterthoughtContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_afterthought); + enterRule(_localctx, 16, RULE_afterthought); try { enterOuterAlt(_localctx, 1); { - setState(151); + setState(182); expression(0); } } @@ -929,28 +1081,28 @@ class PainlessParser extends Parser { public final DeclarationContext declaration() throws RecognitionException { DeclarationContext _localctx = new DeclarationContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_declaration); + enterRule(_localctx, 18, RULE_declaration); int _la; try { enterOuterAlt(_localctx, 1); { - setState(153); + setState(184); decltype(); - setState(154); + setState(185); declvar(); - setState(159); + setState(190); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(155); + setState(186); match(COMMA); - setState(156); + setState(187); declvar(); } } - setState(161); + setState(192); _errHandler.sync(this); _la = _input.LA(1); } @@ -990,26 +1142,26 @@ class PainlessParser extends Parser { public final DecltypeContext decltype() throws RecognitionException { DecltypeContext _localctx = new DecltypeContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_decltype); + enterRule(_localctx, 20, RULE_decltype); int _la; try { enterOuterAlt(_localctx, 1); { - setState(162); + setState(193); match(TYPE); - setState(167); + setState(198); _errHandler.sync(this); _la = _input.LA(1); while (_la==LBRACE) { { { - setState(163); + setState(194); match(LBRACE); - setState(164); + setState(195); match(RBRACE); } } - setState(169); + setState(200); _errHandler.sync(this); _la = _input.LA(1); } @@ -1047,19 +1199,19 @@ class PainlessParser extends Parser { public final FuncrefContext funcref() throws RecognitionException { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_funcref); + enterRule(_localctx, 22, RULE_funcref); int _la; try { - setState(176); + setState(207); switch (_input.LA(1)) { case TYPE: enterOuterAlt(_localctx, 1); { - setState(170); + setState(201); match(TYPE); - setState(171); + setState(202); match(REF); - setState(172); + setState(203); _la = _input.LA(1); if ( !(_la==NEW || _la==ID) ) { _errHandler.recoverInline(this); @@ -1071,11 +1223,11 @@ class PainlessParser extends Parser { case ID: enterOuterAlt(_localctx, 2); { - setState(173); + setState(204); match(ID); - setState(174); + setState(205); match(REF); - setState(175); + setState(206); match(ID); } break; @@ -1113,20 +1265,20 @@ class PainlessParser extends Parser { public final DeclvarContext declvar() throws RecognitionException { DeclvarContext _localctx = new DeclvarContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_declvar); + enterRule(_localctx, 24, RULE_declvar); int _la; try { enterOuterAlt(_localctx, 1); { - setState(178); + setState(209); match(ID); - setState(181); + setState(212); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(179); + setState(210); match(ASSIGN); - setState(180); + setState(211); expression(0); } } @@ -1166,21 +1318,21 @@ class PainlessParser extends Parser { public final TrapContext trap() throws RecognitionException { TrapContext _localctx = new TrapContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_trap); + enterRule(_localctx, 26, RULE_trap); try { enterOuterAlt(_localctx, 1); { - setState(183); + setState(214); match(CATCH); - setState(184); + setState(215); match(LP); - setState(185); + setState(216); match(TYPE); - setState(186); + setState(217); match(ID); - setState(187); + setState(218); match(RP); - setState(188); + setState(219); block(); } } @@ -1211,12 +1363,12 @@ class PainlessParser extends Parser { public final DelimiterContext delimiter() throws RecognitionException { DelimiterContext _localctx = new DelimiterContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_delimiter); + enterRule(_localctx, 28, RULE_delimiter); int _la; try { enterOuterAlt(_localctx, 1); { - setState(190); + setState(221); _la = _input.LA(1); if ( !(_la==EOF || _la==SEMICOLON) ) { _errHandler.recoverInline(this); @@ -1378,31 +1530,31 @@ class PainlessParser extends Parser { int _parentState = getState(); ExpressionContext _localctx = new ExpressionContext(_ctx, _parentState); ExpressionContext _prevctx = _localctx; - int _startState = 26; - enterRecursionRule(_localctx, 26, RULE_expression, _p); + int _startState = 30; + enterRecursionRule(_localctx, 30, RULE_expression, _p); int _la; try { int _alt; enterOuterAlt(_localctx, 1); { - setState(201); - switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) { + setState(232); + switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { case 1: { _localctx = new AssignmentContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(193); + setState(224); chain(true); - setState(194); + setState(225); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ASSIGN) | (1L << AADD) | (1L << ASUB) | (1L << AMUL) | (1L << ADIV) | (1L << AREM) | (1L << AAND) | (1L << AXOR) | (1L << AOR) | (1L << ALSH) | (1L << ARSH) | (1L << AUSH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(195); + setState(226); expression(1); ((AssignmentContext)_localctx).s = false; } @@ -1412,37 +1564,37 @@ class PainlessParser extends Parser { _localctx = new SingleContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(198); + setState(229); ((SingleContext)_localctx).u = unary(false); ((SingleContext)_localctx).s = ((SingleContext)_localctx).u.s; } break; } _ctx.stop = _input.LT(-1); - setState(262); + setState(293); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,18,_ctx); + _alt = getInterpreter().adaptivePredict(_input,21,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(260); - switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) { + setState(291); + switch ( getInterpreter().adaptivePredict(_input,20,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(203); + setState(234); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(204); + setState(235); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(205); + setState(236); expression(13); ((BinaryContext)_localctx).s = false; } @@ -1451,16 +1603,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(208); + setState(239); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(209); + setState(240); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(210); + setState(241); expression(12); ((BinaryContext)_localctx).s = false; } @@ -1469,16 +1621,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(213); + setState(244); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(214); + setState(245); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(215); + setState(246); expression(11); ((BinaryContext)_localctx).s = false; } @@ -1487,16 +1639,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(218); + setState(249); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(219); + setState(250); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(220); + setState(251); expression(10); ((CompContext)_localctx).s = false; } @@ -1505,16 +1657,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(223); + setState(254); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(224); + setState(255); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(225); + setState(256); expression(9); ((CompContext)_localctx).s = false; } @@ -1523,11 +1675,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(228); + setState(259); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(229); + setState(260); match(BWAND); - setState(230); + setState(261); expression(8); ((BinaryContext)_localctx).s = false; } @@ -1536,11 +1688,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(233); + setState(264); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(234); + setState(265); match(XOR); - setState(235); + setState(266); expression(7); ((BinaryContext)_localctx).s = false; } @@ -1549,11 +1701,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(238); + setState(269); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(239); + setState(270); match(BWOR); - setState(240); + setState(271); expression(6); ((BinaryContext)_localctx).s = false; } @@ -1562,11 +1714,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(243); + setState(274); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(244); + setState(275); match(BOOLAND); - setState(245); + setState(276); expression(5); ((BoolContext)_localctx).s = false; } @@ -1575,11 +1727,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(248); + setState(279); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(249); + setState(280); match(BOOLOR); - setState(250); + setState(281); expression(4); ((BoolContext)_localctx).s = false; } @@ -1588,15 +1740,15 @@ class PainlessParser extends Parser { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(253); + setState(284); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(254); + setState(285); match(COND); - setState(255); + setState(286); ((ConditionalContext)_localctx).e0 = expression(0); - setState(256); + setState(287); match(COLON); - setState(257); + setState(288); ((ConditionalContext)_localctx).e1 = expression(2); ((ConditionalContext)_localctx).s = ((ConditionalContext)_localctx).e0.s && ((ConditionalContext)_localctx).e1.s; } @@ -1604,9 +1756,9 @@ class PainlessParser extends Parser { } } } - setState(264); + setState(295); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,18,_ctx); + _alt = getInterpreter().adaptivePredict(_input,21,_ctx); } } } @@ -1748,25 +1900,25 @@ class PainlessParser extends Parser { public final UnaryContext unary(boolean c) throws RecognitionException { UnaryContext _localctx = new UnaryContext(_ctx, getState(), c); - enterRule(_localctx, 28, RULE_unary); + enterRule(_localctx, 32, RULE_unary); int _la; try { - setState(294); - switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { + setState(325); + switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(265); + setState(296); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(266); + setState(297); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(267); + setState(298); chain(true); } break; @@ -1774,11 +1926,11 @@ class PainlessParser extends Parser { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(268); + setState(299); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(269); + setState(300); chain(true); - setState(270); + setState(301); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -1791,9 +1943,9 @@ class PainlessParser extends Parser { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(272); + setState(303); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(273); + setState(304); chain(false); } break; @@ -1801,9 +1953,9 @@ class PainlessParser extends Parser { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(274); + setState(305); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(275); + setState(306); _la = _input.LA(1); if ( !(((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)))) != 0)) ) { _errHandler.recoverInline(this); @@ -1817,9 +1969,9 @@ class PainlessParser extends Parser { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(277); + setState(308); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(278); + setState(309); match(TRUE); ((TrueContext)_localctx).s = false; } @@ -1828,9 +1980,9 @@ class PainlessParser extends Parser { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(280); + setState(311); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(281); + setState(312); match(FALSE); ((FalseContext)_localctx).s = false; } @@ -1839,9 +1991,9 @@ class PainlessParser extends Parser { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(283); + setState(314); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(284); + setState(315); match(NULL); ((NullContext)_localctx).s = false; } @@ -1850,16 +2002,16 @@ class PainlessParser extends Parser { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(286); + setState(317); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(287); + setState(318); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(288); + setState(319); unary(false); } break; @@ -1867,13 +2019,13 @@ class PainlessParser extends Parser { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(289); + setState(320); match(LP); - setState(290); + setState(321); decltype(); - setState(291); + setState(322); match(RP); - setState(292); + setState(323); unary(_localctx.c); } break; @@ -1979,32 +2131,32 @@ class PainlessParser extends Parser { public final ChainContext chain(boolean c) throws RecognitionException { ChainContext _localctx = new ChainContext(_ctx, getState(), c); - enterRule(_localctx, 30, RULE_chain); + enterRule(_localctx, 34, RULE_chain); try { int _alt; - setState(330); - switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) { + setState(361); + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(296); + setState(327); ((DynamicContext)_localctx).p = primary(_localctx.c); - setState(300); + setState(331); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(297); + setState(328); secondary(((DynamicContext)_localctx).p.s); } } } - setState(302); + setState(333); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); } } break; @@ -2012,25 +2164,25 @@ class PainlessParser extends Parser { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(303); + setState(334); decltype(); - setState(304); + setState(335); dot(); - setState(308); + setState(339); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,21,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(305); + setState(336); secondary(true); } } } - setState(310); + setState(341); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,21,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); } } break; @@ -2038,11 +2190,11 @@ class PainlessParser extends Parser { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(311); + setState(342); match(NEW); - setState(312); + setState(343); match(TYPE); - setState(317); + setState(348); _errHandler.sync(this); _alt = 1; do { @@ -2050,11 +2202,11 @@ class PainlessParser extends Parser { case 1: { { - setState(313); + setState(344); match(LBRACE); - setState(314); + setState(345); expression(0); - setState(315); + setState(346); match(RBRACE); } } @@ -2062,31 +2214,31 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(319); + setState(350); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,22,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(328); - switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + setState(359); + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: { - setState(321); + setState(352); dot(); - setState(325); + setState(356); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(322); + setState(353); secondary(true); } } } - setState(327); + setState(358); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); } } break; @@ -2132,6 +2284,18 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } + public static class CalllocalContext extends PrimaryContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public CalllocalContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCalllocal(this); + else return visitor.visitChildren(this); + } + } public static class VariableContext extends PrimaryContext { public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } public VariableContext(PrimaryContext ctx) { copyFrom(ctx); } @@ -2184,21 +2348,21 @@ class PainlessParser extends Parser { public final PrimaryContext primary(boolean c) throws RecognitionException { PrimaryContext _localctx = new PrimaryContext(_ctx, getState(), c); - enterRule(_localctx, 32, RULE_primary); + enterRule(_localctx, 36, RULE_primary); try { - setState(348); - switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { + setState(381); + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: _localctx = new ExprprecContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(332); + setState(363); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(333); + setState(364); match(LP); - setState(334); + setState(365); ((ExprprecContext)_localctx).e = expression(0); - setState(335); + setState(366); match(RP); ((ExprprecContext)_localctx).s = ((ExprprecContext)_localctx).e.s; } @@ -2207,13 +2371,13 @@ class PainlessParser extends Parser { _localctx = new ChainprecContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(338); + setState(369); if (!( _localctx.c )) throw new FailedPredicateException(this, " $c "); - setState(339); + setState(370); match(LP); - setState(340); + setState(371); unary(true); - setState(341); + setState(372); match(RP); } break; @@ -2221,7 +2385,7 @@ class PainlessParser extends Parser { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(343); + setState(374); match(STRING); } break; @@ -2229,19 +2393,29 @@ class PainlessParser extends Parser { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(344); + setState(375); match(ID); } break; case 5: - _localctx = new NewobjectContext(_localctx); + _localctx = new CalllocalContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(345); + setState(376); + match(ID); + setState(377); + arguments(); + } + break; + case 6: + _localctx = new NewobjectContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(378); match(NEW); - setState(346); + setState(379); match(TYPE); - setState(347); + setState(380); arguments(); } break; @@ -2281,25 +2455,25 @@ class PainlessParser extends Parser { public final SecondaryContext secondary(boolean s) throws RecognitionException { SecondaryContext _localctx = new SecondaryContext(_ctx, getState(), s); - enterRule(_localctx, 34, RULE_secondary); + enterRule(_localctx, 38, RULE_secondary); try { - setState(354); - switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + setState(387); + switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(350); + setState(383); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(351); + setState(384); dot(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(352); + setState(385); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(353); + setState(386); brace(); } break; @@ -2354,20 +2528,20 @@ class PainlessParser extends Parser { public final DotContext dot() throws RecognitionException { DotContext _localctx = new DotContext(_ctx, getState()); - enterRule(_localctx, 36, RULE_dot); + enterRule(_localctx, 40, RULE_dot); int _la; try { - setState(361); - switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + setState(394); + switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { case 1: _localctx = new CallinvokeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(356); + setState(389); match(DOT); - setState(357); + setState(390); match(DOTID); - setState(358); + setState(391); arguments(); } break; @@ -2375,9 +2549,9 @@ class PainlessParser extends Parser { _localctx = new FieldaccessContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(359); + setState(392); match(DOT); - setState(360); + setState(393); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2426,16 +2600,16 @@ class PainlessParser extends Parser { public final BraceContext brace() throws RecognitionException { BraceContext _localctx = new BraceContext(_ctx, getState()); - enterRule(_localctx, 38, RULE_brace); + enterRule(_localctx, 42, RULE_brace); try { _localctx = new BraceaccessContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(363); + setState(396); match(LBRACE); - setState(364); + setState(397); expression(0); - setState(365); + setState(398); match(RBRACE); } } @@ -2476,40 +2650,40 @@ class PainlessParser extends Parser { public final ArgumentsContext arguments() throws RecognitionException { ArgumentsContext _localctx = new ArgumentsContext(_ctx, getState()); - enterRule(_localctx, 40, RULE_arguments); + enterRule(_localctx, 44, RULE_arguments); int _la; try { enterOuterAlt(_localctx, 1); { { - setState(367); + setState(400); match(LP); - setState(376); - switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + setState(409); + switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { case 1: { - setState(368); + setState(401); argument(); - setState(373); + setState(406); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(369); + setState(402); match(COMMA); - setState(370); + setState(403); argument(); } } - setState(375); + setState(408); _errHandler.sync(this); _la = _input.LA(1); } } break; } - setState(378); + setState(411); match(RP); } } @@ -2545,21 +2719,21 @@ class PainlessParser extends Parser { public final ArgumentContext argument() throws RecognitionException { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); - enterRule(_localctx, 42, RULE_argument); + enterRule(_localctx, 46, RULE_argument); try { - setState(382); - switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { + setState(415); + switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(380); + setState(413); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(381); + setState(414); funcref(); } break; @@ -2578,15 +2752,15 @@ class PainlessParser extends Parser { public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { - case 1: + case 3: return statement_sempred((StatementContext)_localctx, predIndex); - case 13: + case 15: return expression_sempred((ExpressionContext)_localctx, predIndex); - case 14: - return unary_sempred((UnaryContext)_localctx, predIndex); case 16: + return unary_sempred((UnaryContext)_localctx, predIndex); + case 18: return primary_sempred((PrimaryContext)_localctx, predIndex); - case 17: + case 19: return secondary_sempred((SecondaryContext)_localctx, predIndex); } return true; @@ -2666,148 +2840,160 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3M\u0183\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3M\u01a4\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ - "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\3\2\7\2\60\n\2\f\2"+ - "\16\2\63\13\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3?\n\3\3\3\3\3"+ - "\3\3\3\3\3\3\3\3\5\3G\n\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+ - "\5\3T\n\3\3\3\3\3\5\3X\n\3\3\3\3\3\5\3\\\n\3\3\3\3\3\3\3\5\3a\n\3\3\3"+ - "\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+ - "\3\3\3\3\3\3\3\3\3\6\3z\n\3\r\3\16\3{\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3"+ - "\u0085\n\3\3\4\3\4\5\4\u0089\n\4\3\5\3\5\7\5\u008d\n\5\f\5\16\5\u0090"+ - "\13\5\3\5\3\5\3\6\3\6\3\7\3\7\5\7\u0098\n\7\3\b\3\b\3\t\3\t\3\t\3\t\7"+ - "\t\u00a0\n\t\f\t\16\t\u00a3\13\t\3\n\3\n\3\n\7\n\u00a8\n\n\f\n\16\n\u00ab"+ - "\13\n\3\13\3\13\3\13\3\13\3\13\3\13\5\13\u00b3\n\13\3\f\3\f\3\f\5\f\u00b8"+ - "\n\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3"+ - "\17\3\17\3\17\3\17\5\17\u00cc\n\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\7\17\u0107\n\17\f\17\16\17\u010a"+ - "\13\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\5\20\u0129\n\20\3\21\3\21\7\21\u012d\n\21\f\21\16\21\u0130"+ - "\13\21\3\21\3\21\3\21\7\21\u0135\n\21\f\21\16\21\u0138\13\21\3\21\3\21"+ - "\3\21\3\21\3\21\3\21\6\21\u0140\n\21\r\21\16\21\u0141\3\21\3\21\7\21\u0146"+ - "\n\21\f\21\16\21\u0149\13\21\5\21\u014b\n\21\5\21\u014d\n\21\3\22\3\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\3\2\7\2\64\n\2\f\2\16\2\67\13\2\3\2\7\2:\n\2\f\2\16\2=\13\2\3\2\3\2\3"+ + "\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4M\n\4\f\4\16\4P\13\4"+ + "\5\4R\n\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5^\n\5\3\5\3\5\3\5"+ + "\3\5\3\5\3\5\5\5f\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5"+ + "s\n\5\3\5\3\5\5\5w\n\5\3\5\3\5\5\5{\n\5\3\5\3\5\3\5\5\5\u0080\n\5\3\5"+ + "\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3"+ + "\5\3\5\3\5\3\5\3\5\6\5\u0099\n\5\r\5\16\5\u009a\3\5\3\5\3\5\3\5\3\5\3"+ + "\5\3\5\5\5\u00a4\n\5\3\6\3\6\5\6\u00a8\n\6\3\7\3\7\7\7\u00ac\n\7\f\7\16"+ + "\7\u00af\13\7\3\7\3\7\3\b\3\b\3\t\3\t\5\t\u00b7\n\t\3\n\3\n\3\13\3\13"+ + "\3\13\3\13\7\13\u00bf\n\13\f\13\16\13\u00c2\13\13\3\f\3\f\3\f\7\f\u00c7"+ + "\n\f\f\f\16\f\u00ca\13\f\3\r\3\r\3\r\3\r\3\r\3\r\5\r\u00d2\n\r\3\16\3"+ + "\16\3\16\5\16\u00d7\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\5\21\u00eb\n\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\7\21"+ + "\u0126\n\21\f\21\16\21\u0129\13\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+ "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+ - "\5\22\u015f\n\22\3\23\3\23\3\23\3\23\5\23\u0165\n\23\3\24\3\24\3\24\3"+ - "\24\3\24\5\24\u016c\n\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\7\26"+ - "\u0176\n\26\f\26\16\26\u0179\13\26\5\26\u017b\n\26\3\26\3\26\3\27\3\27"+ - "\5\27\u0181\n\27\3\27\2\3\34\30\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36"+ - " \"$&(*,\2\16\4\2\26\26KK\3\3\r\r\3\2\66A\3\2\34\36\3\2\37 \3\2!#\3\2"+ - "$\'\3\2(+\3\2\64\65\3\2BE\4\2\32\33\37 \3\2LM\u01aa\2\61\3\2\2\2\4\u0084"+ - "\3\2\2\2\6\u0088\3\2\2\2\b\u008a\3\2\2\2\n\u0093\3\2\2\2\f\u0097\3\2\2"+ - "\2\16\u0099\3\2\2\2\20\u009b\3\2\2\2\22\u00a4\3\2\2\2\24\u00b2\3\2\2\2"+ - "\26\u00b4\3\2\2\2\30\u00b9\3\2\2\2\32\u00c0\3\2\2\2\34\u00cb\3\2\2\2\36"+ - "\u0128\3\2\2\2 \u014c\3\2\2\2\"\u015e\3\2\2\2$\u0164\3\2\2\2&\u016b\3"+ - "\2\2\2(\u016d\3\2\2\2*\u0171\3\2\2\2,\u0180\3\2\2\2.\60\5\4\3\2/.\3\2"+ - "\2\2\60\63\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62\64\3\2\2\2\63\61\3\2\2"+ - "\2\64\65\7\2\2\3\65\3\3\2\2\2\66\67\7\16\2\2\678\7\t\2\289\5\34\17\29"+ - ":\7\n\2\2:>\5\6\4\2;<\7\17\2\2;\3\2\2\2>=\3\2\2\2"+ - "?\u0085\3\2\2\2@A\7\20\2\2AB\7\t\2\2BC\5\34\17\2CF\7\n\2\2DG\5\6\4\2E"+ - "G\5\n\6\2FD\3\2\2\2FE\3\2\2\2G\u0085\3\2\2\2HI\7\21\2\2IJ\5\b\5\2JK\7"+ - "\20\2\2KL\7\t\2\2LM\5\34\17\2MN\7\n\2\2NO\5\32\16\2O\u0085\3\2\2\2PQ\7"+ - "\22\2\2QS\7\t\2\2RT\5\f\7\2SR\3\2\2\2ST\3\2\2\2TU\3\2\2\2UW\7\r\2\2VX"+ - "\5\34\17\2WV\3\2\2\2WX\3\2\2\2XY\3\2\2\2Y[\7\r\2\2Z\\\5\16\b\2[Z\3\2\2"+ - "\2[\\\3\2\2\2\\]\3\2\2\2]`\7\n\2\2^a\5\6\4\2_a\5\n\6\2`^\3\2\2\2`_\3\2"+ - "\2\2a\u0085\3\2\2\2bc\7\22\2\2cd\7\t\2\2de\5\22\n\2ef\7K\2\2fg\7\62\2"+ - "\2gh\5\34\17\2hi\7\n\2\2ij\5\6\4\2j\u0085\3\2\2\2kl\5\20\t\2lm\5\32\16"+ - "\2m\u0085\3\2\2\2no\7\23\2\2o\u0085\5\32\16\2pq\7\24\2\2q\u0085\5\32\16"+ - "\2rs\7\25\2\2st\5\34\17\2tu\5\32\16\2u\u0085\3\2\2\2vw\7\27\2\2wy\5\b"+ - "\5\2xz\5\30\r\2yx\3\2\2\2z{\3\2\2\2{y\3\2\2\2{|\3\2\2\2|\u0085\3\2\2\2"+ - "}~\7\31\2\2~\177\5\34\17\2\177\u0080\5\32\16\2\u0080\u0085\3\2\2\2\u0081"+ - "\u0082\5\34\17\2\u0082\u0083\5\32\16\2\u0083\u0085\3\2\2\2\u0084\66\3"+ - "\2\2\2\u0084@\3\2\2\2\u0084H\3\2\2\2\u0084P\3\2\2\2\u0084b\3\2\2\2\u0084"+ - "k\3\2\2\2\u0084n\3\2\2\2\u0084p\3\2\2\2\u0084r\3\2\2\2\u0084v\3\2\2\2"+ - "\u0084}\3\2\2\2\u0084\u0081\3\2\2\2\u0085\5\3\2\2\2\u0086\u0089\5\b\5"+ - "\2\u0087\u0089\5\4\3\2\u0088\u0086\3\2\2\2\u0088\u0087\3\2\2\2\u0089\7"+ - "\3\2\2\2\u008a\u008e\7\5\2\2\u008b\u008d\5\4\3\2\u008c\u008b\3\2\2\2\u008d"+ - "\u0090\3\2\2\2\u008e\u008c\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2"+ - "\2\2\u0090\u008e\3\2\2\2\u0091\u0092\7\6\2\2\u0092\t\3\2\2\2\u0093\u0094"+ - "\7\r\2\2\u0094\13\3\2\2\2\u0095\u0098\5\20\t\2\u0096\u0098\5\34\17\2\u0097"+ - "\u0095\3\2\2\2\u0097\u0096\3\2\2\2\u0098\r\3\2\2\2\u0099\u009a\5\34\17"+ - "\2\u009a\17\3\2\2\2\u009b\u009c\5\22\n\2\u009c\u00a1\5\26\f\2\u009d\u009e"+ - "\7\f\2\2\u009e\u00a0\5\26\f\2\u009f\u009d\3\2\2\2\u00a0\u00a3\3\2\2\2"+ - "\u00a1\u009f\3\2\2\2\u00a1\u00a2\3\2\2\2\u00a2\21\3\2\2\2\u00a3\u00a1"+ - "\3\2\2\2\u00a4\u00a9\7J\2\2\u00a5\u00a6\7\7\2\2\u00a6\u00a8\7\b\2\2\u00a7"+ - "\u00a5\3\2\2\2\u00a8\u00ab\3\2\2\2\u00a9\u00a7\3\2\2\2\u00a9\u00aa\3\2"+ - "\2\2\u00aa\23\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ac\u00ad\7J\2\2\u00ad\u00ae"+ - "\7\63\2\2\u00ae\u00b3\t\2\2\2\u00af\u00b0\7K\2\2\u00b0\u00b1\7\63\2\2"+ - "\u00b1\u00b3\7K\2\2\u00b2\u00ac\3\2\2\2\u00b2\u00af\3\2\2\2\u00b3\25\3"+ - "\2\2\2\u00b4\u00b7\7K\2\2\u00b5\u00b6\7\66\2\2\u00b6\u00b8\5\34\17\2\u00b7"+ - "\u00b5\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\27\3\2\2\2\u00b9\u00ba\7\30\2"+ - "\2\u00ba\u00bb\7\t\2\2\u00bb\u00bc\7J\2\2\u00bc\u00bd\7K\2\2\u00bd\u00be"+ - "\7\n\2\2\u00be\u00bf\5\b\5\2\u00bf\31\3\2\2\2\u00c0\u00c1\t\3\2\2\u00c1"+ - "\33\3\2\2\2\u00c2\u00c3\b\17\1\2\u00c3\u00c4\5 \21\2\u00c4\u00c5\t\4\2"+ - "\2\u00c5\u00c6\5\34\17\3\u00c6\u00c7\b\17\1\2\u00c7\u00cc\3\2\2\2\u00c8"+ - "\u00c9\5\36\20\2\u00c9\u00ca\b\17\1\2\u00ca\u00cc\3\2\2\2\u00cb\u00c2"+ - "\3\2\2\2\u00cb\u00c8\3\2\2\2\u00cc\u0108\3\2\2\2\u00cd\u00ce\f\16\2\2"+ - "\u00ce\u00cf\t\5\2\2\u00cf\u00d0\5\34\17\17\u00d0\u00d1\b\17\1\2\u00d1"+ - "\u0107\3\2\2\2\u00d2\u00d3\f\r\2\2\u00d3\u00d4\t\6\2\2\u00d4\u00d5\5\34"+ - "\17\16\u00d5\u00d6\b\17\1\2\u00d6\u0107\3\2\2\2\u00d7\u00d8\f\f\2\2\u00d8"+ - "\u00d9\t\7\2\2\u00d9\u00da\5\34\17\r\u00da\u00db\b\17\1\2\u00db\u0107"+ - "\3\2\2\2\u00dc\u00dd\f\13\2\2\u00dd\u00de\t\b\2\2\u00de\u00df\5\34\17"+ - "\f\u00df\u00e0\b\17\1\2\u00e0\u0107\3\2\2\2\u00e1\u00e2\f\n\2\2\u00e2"+ - "\u00e3\t\t\2\2\u00e3\u00e4\5\34\17\13\u00e4\u00e5\b\17\1\2\u00e5\u0107"+ - "\3\2\2\2\u00e6\u00e7\f\t\2\2\u00e7\u00e8\7,\2\2\u00e8\u00e9\5\34\17\n"+ - "\u00e9\u00ea\b\17\1\2\u00ea\u0107\3\2\2\2\u00eb\u00ec\f\b\2\2\u00ec\u00ed"+ - "\7-\2\2\u00ed\u00ee\5\34\17\t\u00ee\u00ef\b\17\1\2\u00ef\u0107\3\2\2\2"+ - "\u00f0\u00f1\f\7\2\2\u00f1\u00f2\7.\2\2\u00f2\u00f3\5\34\17\b\u00f3\u00f4"+ - "\b\17\1\2\u00f4\u0107\3\2\2\2\u00f5\u00f6\f\6\2\2\u00f6\u00f7\7/\2\2\u00f7"+ - "\u00f8\5\34\17\7\u00f8\u00f9\b\17\1\2\u00f9\u0107\3\2\2\2\u00fa\u00fb"+ - "\f\5\2\2\u00fb\u00fc\7\60\2\2\u00fc\u00fd\5\34\17\6\u00fd\u00fe\b\17\1"+ - "\2\u00fe\u0107\3\2\2\2\u00ff\u0100\f\4\2\2\u0100\u0101\7\61\2\2\u0101"+ - "\u0102\5\34\17\2\u0102\u0103\7\62\2\2\u0103\u0104\5\34\17\4\u0104\u0105"+ - "\b\17\1\2\u0105\u0107\3\2\2\2\u0106\u00cd\3\2\2\2\u0106\u00d2\3\2\2\2"+ - "\u0106\u00d7\3\2\2\2\u0106\u00dc\3\2\2\2\u0106\u00e1\3\2\2\2\u0106\u00e6"+ - "\3\2\2\2\u0106\u00eb\3\2\2\2\u0106\u00f0\3\2\2\2\u0106\u00f5\3\2\2\2\u0106"+ - "\u00fa\3\2\2\2\u0106\u00ff\3\2\2\2\u0107\u010a\3\2\2\2\u0108\u0106\3\2"+ - "\2\2\u0108\u0109\3\2\2\2\u0109\35\3\2\2\2\u010a\u0108\3\2\2\2\u010b\u010c"+ - "\6\20\16\3\u010c\u010d\t\n\2\2\u010d\u0129\5 \21\2\u010e\u010f\6\20\17"+ - "\3\u010f\u0110\5 \21\2\u0110\u0111\t\n\2\2\u0111\u0129\3\2\2\2\u0112\u0113"+ - "\6\20\20\3\u0113\u0129\5 \21\2\u0114\u0115\6\20\21\3\u0115\u0116\t\13"+ - "\2\2\u0116\u0129\b\20\1\2\u0117\u0118\6\20\22\3\u0118\u0119\7G\2\2\u0119"+ - "\u0129\b\20\1\2\u011a\u011b\6\20\23\3\u011b\u011c\7H\2\2\u011c\u0129\b"+ - "\20\1\2\u011d\u011e\6\20\24\3\u011e\u011f\7I\2\2\u011f\u0129\b\20\1\2"+ - "\u0120\u0121\6\20\25\3\u0121\u0122\t\f\2\2\u0122\u0129\5\36\20\2\u0123"+ - "\u0124\7\t\2\2\u0124\u0125\5\22\n\2\u0125\u0126\7\n\2\2\u0126\u0127\5"+ - "\36\20\2\u0127\u0129\3\2\2\2\u0128\u010b\3\2\2\2\u0128\u010e\3\2\2\2\u0128"+ - "\u0112\3\2\2\2\u0128\u0114\3\2\2\2\u0128\u0117\3\2\2\2\u0128\u011a\3\2"+ - "\2\2\u0128\u011d\3\2\2\2\u0128\u0120\3\2\2\2\u0128\u0123\3\2\2\2\u0129"+ - "\37\3\2\2\2\u012a\u012e\5\"\22\2\u012b\u012d\5$\23\2\u012c\u012b\3\2\2"+ - "\2\u012d\u0130\3\2\2\2\u012e\u012c\3\2\2\2\u012e\u012f\3\2\2\2\u012f\u014d"+ - "\3\2\2\2\u0130\u012e\3\2\2\2\u0131\u0132\5\22\n\2\u0132\u0136\5&\24\2"+ - "\u0133\u0135\5$\23\2\u0134\u0133\3\2\2\2\u0135\u0138\3\2\2\2\u0136\u0134"+ - "\3\2\2\2\u0136\u0137\3\2\2\2\u0137\u014d\3\2\2\2\u0138\u0136\3\2\2\2\u0139"+ - "\u013a\7\26\2\2\u013a\u013f\7J\2\2\u013b\u013c\7\7\2\2\u013c\u013d\5\34"+ - "\17\2\u013d\u013e\7\b\2\2\u013e\u0140\3\2\2\2\u013f\u013b\3\2\2\2\u0140"+ - "\u0141\3\2\2\2\u0141\u013f\3\2\2\2\u0141\u0142\3\2\2\2\u0142\u014a\3\2"+ - "\2\2\u0143\u0147\5&\24\2\u0144\u0146\5$\23\2\u0145\u0144\3\2\2\2\u0146"+ - "\u0149\3\2\2\2\u0147\u0145\3\2\2\2\u0147\u0148\3\2\2\2\u0148\u014b\3\2"+ - "\2\2\u0149\u0147\3\2\2\2\u014a\u0143\3\2\2\2\u014a\u014b\3\2\2\2\u014b"+ - "\u014d\3\2\2\2\u014c\u012a\3\2\2\2\u014c\u0131\3\2\2\2\u014c\u0139\3\2"+ - "\2\2\u014d!\3\2\2\2\u014e\u014f\6\22\26\3\u014f\u0150\7\t\2\2\u0150\u0151"+ - "\5\34\17\2\u0151\u0152\7\n\2\2\u0152\u0153\b\22\1\2\u0153\u015f\3\2\2"+ - "\2\u0154\u0155\6\22\27\3\u0155\u0156\7\t\2\2\u0156\u0157\5\36\20\2\u0157"+ - "\u0158\7\n\2\2\u0158\u015f\3\2\2\2\u0159\u015f\7F\2\2\u015a\u015f\7K\2"+ - "\2\u015b\u015c\7\26\2\2\u015c\u015d\7J\2\2\u015d\u015f\5*\26\2\u015e\u014e"+ - "\3\2\2\2\u015e\u0154\3\2\2\2\u015e\u0159\3\2\2\2\u015e\u015a\3\2\2\2\u015e"+ - "\u015b\3\2\2\2\u015f#\3\2\2\2\u0160\u0161\6\23\30\3\u0161\u0165\5&\24"+ - "\2\u0162\u0163\6\23\31\3\u0163\u0165\5(\25\2\u0164\u0160\3\2\2\2\u0164"+ - "\u0162\3\2\2\2\u0165%\3\2\2\2\u0166\u0167\7\13\2\2\u0167\u0168\7M\2\2"+ - "\u0168\u016c\5*\26\2\u0169\u016a\7\13\2\2\u016a\u016c\t\r\2\2\u016b\u0166"+ - "\3\2\2\2\u016b\u0169\3\2\2\2\u016c\'\3\2\2\2\u016d\u016e\7\7\2\2\u016e"+ - "\u016f\5\34\17\2\u016f\u0170\7\b\2\2\u0170)\3\2\2\2\u0171\u017a\7\t\2"+ - "\2\u0172\u0177\5,\27\2\u0173\u0174\7\f\2\2\u0174\u0176\5,\27\2\u0175\u0173"+ - "\3\2\2\2\u0176\u0179\3\2\2\2\u0177\u0175\3\2\2\2\u0177\u0178\3\2\2\2\u0178"+ - "\u017b\3\2\2\2\u0179\u0177\3\2\2\2\u017a\u0172\3\2\2\2\u017a\u017b\3\2"+ - "\2\2\u017b\u017c\3\2\2\2\u017c\u017d\7\n\2\2\u017d+\3\2\2\2\u017e\u0181"+ - "\5\34\17\2\u017f\u0181\5\24\13\2\u0180\u017e\3\2\2\2\u0180\u017f\3\2\2"+ - "\2\u0181-\3\2\2\2\"\61>FSW[`{\u0084\u0088\u008e\u0097\u00a1\u00a9\u00b2"+ - "\u00b7\u00cb\u0106\u0108\u0128\u012e\u0136\u0141\u0147\u014a\u014c\u015e"+ - "\u0164\u016b\u0177\u017a\u0180"; + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u0148\n\22\3\23\3\23\7\23"+ + "\u014c\n\23\f\23\16\23\u014f\13\23\3\23\3\23\3\23\7\23\u0154\n\23\f\23"+ + "\16\23\u0157\13\23\3\23\3\23\3\23\3\23\3\23\3\23\6\23\u015f\n\23\r\23"+ + "\16\23\u0160\3\23\3\23\7\23\u0165\n\23\f\23\16\23\u0168\13\23\5\23\u016a"+ + "\n\23\5\23\u016c\n\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24"+ + "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\5\24\u0180\n\24\3\25\3\25\3\25"+ + "\3\25\5\25\u0186\n\25\3\26\3\26\3\26\3\26\3\26\5\26\u018d\n\26\3\27\3"+ + "\27\3\27\3\27\3\30\3\30\3\30\3\30\7\30\u0197\n\30\f\30\16\30\u019a\13"+ + "\30\5\30\u019c\n\30\3\30\3\30\3\31\3\31\5\31\u01a2\n\31\3\31\2\3 \32\2"+ + "\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\2\16\4\2\26\26KK\3\3"+ + "\r\r\3\2\66A\3\2\34\36\3\2\37 \3\2!#\3\2$\'\3\2(+\3\2\64\65\3\2BE\4\2"+ + "\32\33\37 \3\2LM\u01cd\2\65\3\2\2\2\4@\3\2\2\2\6E\3\2\2\2\b\u00a3\3\2"+ + "\2\2\n\u00a7\3\2\2\2\f\u00a9\3\2\2\2\16\u00b2\3\2\2\2\20\u00b6\3\2\2\2"+ + "\22\u00b8\3\2\2\2\24\u00ba\3\2\2\2\26\u00c3\3\2\2\2\30\u00d1\3\2\2\2\32"+ + "\u00d3\3\2\2\2\34\u00d8\3\2\2\2\36\u00df\3\2\2\2 \u00ea\3\2\2\2\"\u0147"+ + "\3\2\2\2$\u016b\3\2\2\2&\u017f\3\2\2\2(\u0185\3\2\2\2*\u018c\3\2\2\2,"+ + "\u018e\3\2\2\2.\u0192\3\2\2\2\60\u01a1\3\2\2\2\62\64\5\4\3\2\63\62\3\2"+ + "\2\2\64\67\3\2\2\2\65\63\3\2\2\2\65\66\3\2\2\2\66;\3\2\2\2\67\65\3\2\2"+ + "\28:\5\b\5\298\3\2\2\2:=\3\2\2\2;9\3\2\2\2;<\3\2\2\2<>\3\2\2\2=;\3\2\2"+ + "\2>?\7\2\2\3?\3\3\2\2\2@A\5\26\f\2AB\7K\2\2BC\5\6\4\2CD\5\f\7\2D\5\3\2"+ + "\2\2EQ\7\t\2\2FG\5\26\f\2GN\7K\2\2HI\7\f\2\2IJ\5\26\f\2JK\7K\2\2KM\3\2"+ + "\2\2LH\3\2\2\2MP\3\2\2\2NL\3\2\2\2NO\3\2\2\2OR\3\2\2\2PN\3\2\2\2QF\3\2"+ + "\2\2QR\3\2\2\2RS\3\2\2\2ST\7\n\2\2T\7\3\2\2\2UV\7\16\2\2VW\7\t\2\2WX\5"+ + " \21\2XY\7\n\2\2Y]\5\n\6\2Z[\7\17\2\2[^\5\n\6\2\\^\6\5\2\2]Z\3\2\2\2]"+ + "\\\3\2\2\2^\u00a4\3\2\2\2_`\7\20\2\2`a\7\t\2\2ab\5 \21\2be\7\n\2\2cf\5"+ + "\n\6\2df\5\16\b\2ec\3\2\2\2ed\3\2\2\2f\u00a4\3\2\2\2gh\7\21\2\2hi\5\f"+ + "\7\2ij\7\20\2\2jk\7\t\2\2kl\5 \21\2lm\7\n\2\2mn\5\36\20\2n\u00a4\3\2\2"+ + "\2op\7\22\2\2pr\7\t\2\2qs\5\20\t\2rq\3\2\2\2rs\3\2\2\2st\3\2\2\2tv\7\r"+ + "\2\2uw\5 \21\2vu\3\2\2\2vw\3\2\2\2wx\3\2\2\2xz\7\r\2\2y{\5\22\n\2zy\3"+ + "\2\2\2z{\3\2\2\2{|\3\2\2\2|\177\7\n\2\2}\u0080\5\n\6\2~\u0080\5\16\b\2"+ + "\177}\3\2\2\2\177~\3\2\2\2\u0080\u00a4\3\2\2\2\u0081\u0082\7\22\2\2\u0082"+ + "\u0083\7\t\2\2\u0083\u0084\5\26\f\2\u0084\u0085\7K\2\2\u0085\u0086\7\62"+ + "\2\2\u0086\u0087\5 \21\2\u0087\u0088\7\n\2\2\u0088\u0089\5\n\6\2\u0089"+ + "\u00a4\3\2\2\2\u008a\u008b\5\24\13\2\u008b\u008c\5\36\20\2\u008c\u00a4"+ + "\3\2\2\2\u008d\u008e\7\23\2\2\u008e\u00a4\5\36\20\2\u008f\u0090\7\24\2"+ + "\2\u0090\u00a4\5\36\20\2\u0091\u0092\7\25\2\2\u0092\u0093\5 \21\2\u0093"+ + "\u0094\5\36\20\2\u0094\u00a4\3\2\2\2\u0095\u0096\7\27\2\2\u0096\u0098"+ + "\5\f\7\2\u0097\u0099\5\34\17\2\u0098\u0097\3\2\2\2\u0099\u009a\3\2\2\2"+ + "\u009a\u0098\3\2\2\2\u009a\u009b\3\2\2\2\u009b\u00a4\3\2\2\2\u009c\u009d"+ + "\7\31\2\2\u009d\u009e\5 \21\2\u009e\u009f\5\36\20\2\u009f\u00a4\3\2\2"+ + "\2\u00a0\u00a1\5 \21\2\u00a1\u00a2\5\36\20\2\u00a2\u00a4\3\2\2\2\u00a3"+ + "U\3\2\2\2\u00a3_\3\2\2\2\u00a3g\3\2\2\2\u00a3o\3\2\2\2\u00a3\u0081\3\2"+ + "\2\2\u00a3\u008a\3\2\2\2\u00a3\u008d\3\2\2\2\u00a3\u008f\3\2\2\2\u00a3"+ + "\u0091\3\2\2\2\u00a3\u0095\3\2\2\2\u00a3\u009c\3\2\2\2\u00a3\u00a0\3\2"+ + "\2\2\u00a4\t\3\2\2\2\u00a5\u00a8\5\f\7\2\u00a6\u00a8\5\b\5\2\u00a7\u00a5"+ + "\3\2\2\2\u00a7\u00a6\3\2\2\2\u00a8\13\3\2\2\2\u00a9\u00ad\7\5\2\2\u00aa"+ + "\u00ac\5\b\5\2\u00ab\u00aa\3\2\2\2\u00ac\u00af\3\2\2\2\u00ad\u00ab\3\2"+ + "\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00b0\3\2\2\2\u00af\u00ad\3\2\2\2\u00b0"+ + "\u00b1\7\6\2\2\u00b1\r\3\2\2\2\u00b2\u00b3\7\r\2\2\u00b3\17\3\2\2\2\u00b4"+ + "\u00b7\5\24\13\2\u00b5\u00b7\5 \21\2\u00b6\u00b4\3\2\2\2\u00b6\u00b5\3"+ + "\2\2\2\u00b7\21\3\2\2\2\u00b8\u00b9\5 \21\2\u00b9\23\3\2\2\2\u00ba\u00bb"+ + "\5\26\f\2\u00bb\u00c0\5\32\16\2\u00bc\u00bd\7\f\2\2\u00bd\u00bf\5\32\16"+ + "\2\u00be\u00bc\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0\u00be\3\2\2\2\u00c0\u00c1"+ + "\3\2\2\2\u00c1\25\3\2\2\2\u00c2\u00c0\3\2\2\2\u00c3\u00c8\7J\2\2\u00c4"+ + "\u00c5\7\7\2\2\u00c5\u00c7\7\b\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00ca\3\2"+ + "\2\2\u00c8\u00c6\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9\27\3\2\2\2\u00ca\u00c8"+ + "\3\2\2\2\u00cb\u00cc\7J\2\2\u00cc\u00cd\7\63\2\2\u00cd\u00d2\t\2\2\2\u00ce"+ + "\u00cf\7K\2\2\u00cf\u00d0\7\63\2\2\u00d0\u00d2\7K\2\2\u00d1\u00cb\3\2"+ + "\2\2\u00d1\u00ce\3\2\2\2\u00d2\31\3\2\2\2\u00d3\u00d6\7K\2\2\u00d4\u00d5"+ + "\7\66\2\2\u00d5\u00d7\5 \21\2\u00d6\u00d4\3\2\2\2\u00d6\u00d7\3\2\2\2"+ + "\u00d7\33\3\2\2\2\u00d8\u00d9\7\30\2\2\u00d9\u00da\7\t\2\2\u00da\u00db"+ + "\7J\2\2\u00db\u00dc\7K\2\2\u00dc\u00dd\7\n\2\2\u00dd\u00de\5\f\7\2\u00de"+ + "\35\3\2\2\2\u00df\u00e0\t\3\2\2\u00e0\37\3\2\2\2\u00e1\u00e2\b\21\1\2"+ + "\u00e2\u00e3\5$\23\2\u00e3\u00e4\t\4\2\2\u00e4\u00e5\5 \21\3\u00e5\u00e6"+ + "\b\21\1\2\u00e6\u00eb\3\2\2\2\u00e7\u00e8\5\"\22\2\u00e8\u00e9\b\21\1"+ + "\2\u00e9\u00eb\3\2\2\2\u00ea\u00e1\3\2\2\2\u00ea\u00e7\3\2\2\2\u00eb\u0127"+ + "\3\2\2\2\u00ec\u00ed\f\16\2\2\u00ed\u00ee\t\5\2\2\u00ee\u00ef\5 \21\17"+ + "\u00ef\u00f0\b\21\1\2\u00f0\u0126\3\2\2\2\u00f1\u00f2\f\r\2\2\u00f2\u00f3"+ + "\t\6\2\2\u00f3\u00f4\5 \21\16\u00f4\u00f5\b\21\1\2\u00f5\u0126\3\2\2\2"+ + "\u00f6\u00f7\f\f\2\2\u00f7\u00f8\t\7\2\2\u00f8\u00f9\5 \21\r\u00f9\u00fa"+ + "\b\21\1\2\u00fa\u0126\3\2\2\2\u00fb\u00fc\f\13\2\2\u00fc\u00fd\t\b\2\2"+ + "\u00fd\u00fe\5 \21\f\u00fe\u00ff\b\21\1\2\u00ff\u0126\3\2\2\2\u0100\u0101"+ + "\f\n\2\2\u0101\u0102\t\t\2\2\u0102\u0103\5 \21\13\u0103\u0104\b\21\1\2"+ + "\u0104\u0126\3\2\2\2\u0105\u0106\f\t\2\2\u0106\u0107\7,\2\2\u0107\u0108"+ + "\5 \21\n\u0108\u0109\b\21\1\2\u0109\u0126\3\2\2\2\u010a\u010b\f\b\2\2"+ + "\u010b\u010c\7-\2\2\u010c\u010d\5 \21\t\u010d\u010e\b\21\1\2\u010e\u0126"+ + "\3\2\2\2\u010f\u0110\f\7\2\2\u0110\u0111\7.\2\2\u0111\u0112\5 \21\b\u0112"+ + "\u0113\b\21\1\2\u0113\u0126\3\2\2\2\u0114\u0115\f\6\2\2\u0115\u0116\7"+ + "/\2\2\u0116\u0117\5 \21\7\u0117\u0118\b\21\1\2\u0118\u0126\3\2\2\2\u0119"+ + "\u011a\f\5\2\2\u011a\u011b\7\60\2\2\u011b\u011c\5 \21\6\u011c\u011d\b"+ + "\21\1\2\u011d\u0126\3\2\2\2\u011e\u011f\f\4\2\2\u011f\u0120\7\61\2\2\u0120"+ + "\u0121\5 \21\2\u0121\u0122\7\62\2\2\u0122\u0123\5 \21\4\u0123\u0124\b"+ + "\21\1\2\u0124\u0126\3\2\2\2\u0125\u00ec\3\2\2\2\u0125\u00f1\3\2\2\2\u0125"+ + "\u00f6\3\2\2\2\u0125\u00fb\3\2\2\2\u0125\u0100\3\2\2\2\u0125\u0105\3\2"+ + "\2\2\u0125\u010a\3\2\2\2\u0125\u010f\3\2\2\2\u0125\u0114\3\2\2\2\u0125"+ + "\u0119\3\2\2\2\u0125\u011e\3\2\2\2\u0126\u0129\3\2\2\2\u0127\u0125\3\2"+ + "\2\2\u0127\u0128\3\2\2\2\u0128!\3\2\2\2\u0129\u0127\3\2\2\2\u012a\u012b"+ + "\6\22\16\3\u012b\u012c\t\n\2\2\u012c\u0148\5$\23\2\u012d\u012e\6\22\17"+ + "\3\u012e\u012f\5$\23\2\u012f\u0130\t\n\2\2\u0130\u0148\3\2\2\2\u0131\u0132"+ + "\6\22\20\3\u0132\u0148\5$\23\2\u0133\u0134\6\22\21\3\u0134\u0135\t\13"+ + "\2\2\u0135\u0148\b\22\1\2\u0136\u0137\6\22\22\3\u0137\u0138\7G\2\2\u0138"+ + "\u0148\b\22\1\2\u0139\u013a\6\22\23\3\u013a\u013b\7H\2\2\u013b\u0148\b"+ + "\22\1\2\u013c\u013d\6\22\24\3\u013d\u013e\7I\2\2\u013e\u0148\b\22\1\2"+ + "\u013f\u0140\6\22\25\3\u0140\u0141\t\f\2\2\u0141\u0148\5\"\22\2\u0142"+ + "\u0143\7\t\2\2\u0143\u0144\5\26\f\2\u0144\u0145\7\n\2\2\u0145\u0146\5"+ + "\"\22\2\u0146\u0148\3\2\2\2\u0147\u012a\3\2\2\2\u0147\u012d\3\2\2\2\u0147"+ + "\u0131\3\2\2\2\u0147\u0133\3\2\2\2\u0147\u0136\3\2\2\2\u0147\u0139\3\2"+ + "\2\2\u0147\u013c\3\2\2\2\u0147\u013f\3\2\2\2\u0147\u0142\3\2\2\2\u0148"+ + "#\3\2\2\2\u0149\u014d\5&\24\2\u014a\u014c\5(\25\2\u014b\u014a\3\2\2\2"+ + "\u014c\u014f\3\2\2\2\u014d\u014b\3\2\2\2\u014d\u014e\3\2\2\2\u014e\u016c"+ + "\3\2\2\2\u014f\u014d\3\2\2\2\u0150\u0151\5\26\f\2\u0151\u0155\5*\26\2"+ + "\u0152\u0154\5(\25\2\u0153\u0152\3\2\2\2\u0154\u0157\3\2\2\2\u0155\u0153"+ + "\3\2\2\2\u0155\u0156\3\2\2\2\u0156\u016c\3\2\2\2\u0157\u0155\3\2\2\2\u0158"+ + "\u0159\7\26\2\2\u0159\u015e\7J\2\2\u015a\u015b\7\7\2\2\u015b\u015c\5 "+ + "\21\2\u015c\u015d\7\b\2\2\u015d\u015f\3\2\2\2\u015e\u015a\3\2\2\2\u015f"+ + "\u0160\3\2\2\2\u0160\u015e\3\2\2\2\u0160\u0161\3\2\2\2\u0161\u0169\3\2"+ + "\2\2\u0162\u0166\5*\26\2\u0163\u0165\5(\25\2\u0164\u0163\3\2\2\2\u0165"+ + "\u0168\3\2\2\2\u0166\u0164\3\2\2\2\u0166\u0167\3\2\2\2\u0167\u016a\3\2"+ + "\2\2\u0168\u0166\3\2\2\2\u0169\u0162\3\2\2\2\u0169\u016a\3\2\2\2\u016a"+ + "\u016c\3\2\2\2\u016b\u0149\3\2\2\2\u016b\u0150\3\2\2\2\u016b\u0158\3\2"+ + "\2\2\u016c%\3\2\2\2\u016d\u016e\6\24\26\3\u016e\u016f\7\t\2\2\u016f\u0170"+ + "\5 \21\2\u0170\u0171\7\n\2\2\u0171\u0172\b\24\1\2\u0172\u0180\3\2\2\2"+ + "\u0173\u0174\6\24\27\3\u0174\u0175\7\t\2\2\u0175\u0176\5\"\22\2\u0176"+ + "\u0177\7\n\2\2\u0177\u0180\3\2\2\2\u0178\u0180\7F\2\2\u0179\u0180\7K\2"+ + "\2\u017a\u017b\7K\2\2\u017b\u0180\5.\30\2\u017c\u017d\7\26\2\2\u017d\u017e"+ + "\7J\2\2\u017e\u0180\5.\30\2\u017f\u016d\3\2\2\2\u017f\u0173\3\2\2\2\u017f"+ + "\u0178\3\2\2\2\u017f\u0179\3\2\2\2\u017f\u017a\3\2\2\2\u017f\u017c\3\2"+ + "\2\2\u0180\'\3\2\2\2\u0181\u0182\6\25\30\3\u0182\u0186\5*\26\2\u0183\u0184"+ + "\6\25\31\3\u0184\u0186\5,\27\2\u0185\u0181\3\2\2\2\u0185\u0183\3\2\2\2"+ + "\u0186)\3\2\2\2\u0187\u0188\7\13\2\2\u0188\u0189\7M\2\2\u0189\u018d\5"+ + ".\30\2\u018a\u018b\7\13\2\2\u018b\u018d\t\r\2\2\u018c\u0187\3\2\2\2\u018c"+ + "\u018a\3\2\2\2\u018d+\3\2\2\2\u018e\u018f\7\7\2\2\u018f\u0190\5 \21\2"+ + "\u0190\u0191\7\b\2\2\u0191-\3\2\2\2\u0192\u019b\7\t\2\2\u0193\u0198\5"+ + "\60\31\2\u0194\u0195\7\f\2\2\u0195\u0197\5\60\31\2\u0196\u0194\3\2\2\2"+ + "\u0197\u019a\3\2\2\2\u0198\u0196\3\2\2\2\u0198\u0199\3\2\2\2\u0199\u019c"+ + "\3\2\2\2\u019a\u0198\3\2\2\2\u019b\u0193\3\2\2\2\u019b\u019c\3\2\2\2\u019c"+ + "\u019d\3\2\2\2\u019d\u019e\7\n\2\2\u019e/\3\2\2\2\u019f\u01a2\5 \21\2"+ + "\u01a0\u01a2\5\30\r\2\u01a1\u019f\3\2\2\2\u01a1\u01a0\3\2\2\2\u01a2\61"+ + "\3\2\2\2%\65;NQ]ervz\177\u009a\u00a3\u00a7\u00ad\u00b6\u00c0\u00c8\u00d1"+ + "\u00d6\u00ea\u0125\u0127\u0147\u014d\u0155\u0160\u0166\u0169\u016b\u017f"+ + "\u0185\u018c\u0198\u019b\u01a1"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java index f116f087c5c..08358384fca 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java @@ -18,6 +18,20 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitSource(PainlessParser.SourceContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitFunction(PainlessParser.FunctionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitParameters(PainlessParser.ParametersContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -333,6 +347,13 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitVariable(PainlessParser.VariableContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitCalllocal(PainlessParser.CalllocalContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java index f0943743ef8..5c0adb5076a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java @@ -16,6 +16,18 @@ interface PainlessParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitSource(PainlessParser.SourceContext ctx); + /** + * Visit a parse tree produced by {@link PainlessParser#function}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitFunction(PainlessParser.FunctionContext ctx); + /** + * Visit a parse tree produced by {@link PainlessParser#parameters}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitParameters(PainlessParser.ParametersContext ctx); /** * Visit a parse tree produced by the {@code if} * labeled alternative in {@link PainlessParser#statement}. @@ -320,6 +332,13 @@ interface PainlessParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitVariable(PainlessParser.VariableContext ctx); + /** + * Visit a parse tree produced by the {@code calllocal} + * labeled alternative in {@link PainlessParser#primary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitCalllocal(PainlessParser.CalllocalContext ctx); /** * Visit a parse tree produced by the {@code newobject} * labeled alternative in {@link PainlessParser#primary}. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 82ccf4083f1..aa474f14c81 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -27,10 +27,13 @@ import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.RecognitionException; import org.antlr.v4.runtime.Recognizer; import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.tree.TerminalNode; import org.elasticsearch.painless.CompilerSettings; +import org.elasticsearch.painless.Locals.ExecuteReserved; +import org.elasticsearch.painless.Locals.FunctionReserved; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables.Reserved; +import org.elasticsearch.painless.Locals.Reserved; import org.elasticsearch.painless.antlr.PainlessParser.AfterthoughtContext; import org.elasticsearch.painless.antlr.PainlessParser.ArgumentContext; import org.elasticsearch.painless.antlr.PainlessParser.ArgumentsContext; @@ -41,6 +44,7 @@ import org.elasticsearch.painless.antlr.PainlessParser.BoolContext; import org.elasticsearch.painless.antlr.PainlessParser.BraceaccessContext; import org.elasticsearch.painless.antlr.PainlessParser.BreakContext; import org.elasticsearch.painless.antlr.PainlessParser.CallinvokeContext; +import org.elasticsearch.painless.antlr.PainlessParser.CalllocalContext; import org.elasticsearch.painless.antlr.PainlessParser.CastContext; import org.elasticsearch.painless.antlr.PainlessParser.ChainprecContext; import org.elasticsearch.painless.antlr.PainlessParser.CompContext; @@ -62,6 +66,7 @@ import org.elasticsearch.painless.antlr.PainlessParser.FalseContext; import org.elasticsearch.painless.antlr.PainlessParser.FieldaccessContext; import org.elasticsearch.painless.antlr.PainlessParser.ForContext; import org.elasticsearch.painless.antlr.PainlessParser.FuncrefContext; +import org.elasticsearch.painless.antlr.PainlessParser.FunctionContext; import org.elasticsearch.painless.antlr.PainlessParser.IfContext; import org.elasticsearch.painless.antlr.PainlessParser.InitializerContext; import org.elasticsearch.painless.antlr.PainlessParser.NewarrayContext; @@ -69,6 +74,7 @@ import org.elasticsearch.painless.antlr.PainlessParser.NewobjectContext; import org.elasticsearch.painless.antlr.PainlessParser.NullContext; import org.elasticsearch.painless.antlr.PainlessParser.NumericContext; import org.elasticsearch.painless.antlr.PainlessParser.OperatorContext; +import org.elasticsearch.painless.antlr.PainlessParser.ParametersContext; import org.elasticsearch.painless.antlr.PainlessParser.PostContext; import org.elasticsearch.painless.antlr.PainlessParser.PreContext; import org.elasticsearch.painless.antlr.PainlessParser.ReadContext; @@ -105,7 +111,8 @@ import org.elasticsearch.painless.node.ENull; import org.elasticsearch.painless.node.ENumeric; import org.elasticsearch.painless.node.EUnary; import org.elasticsearch.painless.node.LBrace; -import org.elasticsearch.painless.node.LCall; +import org.elasticsearch.painless.node.LCallInvoke; +import org.elasticsearch.painless.node.LCallLocal; import org.elasticsearch.painless.node.LCast; import org.elasticsearch.painless.node.LField; import org.elasticsearch.painless.node.LNewArray; @@ -123,6 +130,7 @@ import org.elasticsearch.painless.node.SDo; import org.elasticsearch.painless.node.SEach; import org.elasticsearch.painless.node.SExpression; import org.elasticsearch.painless.node.SFor; +import org.elasticsearch.painless.node.SFunction; import org.elasticsearch.painless.node.SIf; import org.elasticsearch.painless.node.SIfElse; import org.elasticsearch.painless.node.SReturn; @@ -131,7 +139,9 @@ import org.elasticsearch.painless.node.SThrow; import org.elasticsearch.painless.node.STry; import org.elasticsearch.painless.node.SWhile; +import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Deque; import java.util.List; /** @@ -139,19 +149,21 @@ import java.util.List; */ public final class Walker extends PainlessParserBaseVisitor { - public static SSource buildPainlessTree(String name, String sourceText, Reserved reserved, CompilerSettings settings) { - return new Walker(name, sourceText, reserved, settings).source; + public static SSource buildPainlessTree(String sourceName, String sourceText, CompilerSettings settings) { + return new Walker(sourceName, sourceText, settings).source; } - private final Reserved reserved; private final SSource source; private final CompilerSettings settings; private final String sourceName; + private final String sourceText; - private Walker(String name, String sourceText, Reserved reserved, CompilerSettings settings) { - this.reserved = reserved; + private final Deque reserved = new ArrayDeque<>(); + + private Walker(String sourceName, String sourceText, CompilerSettings settings) { this.settings = settings; - this.sourceName = Location.computeSourceName(name, sourceText); + this.sourceName = Location.computeSourceName(sourceName, sourceText); + this.sourceText = sourceText; this.source = (SSource)visit(buildAntlrTree(sourceText)); } @@ -197,13 +209,51 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public Object visitSource(SourceContext ctx) { + reserved.push(new ExecuteReserved()); + + List functions = new ArrayList<>(); + + for (FunctionContext function : ctx.function()) { + functions.add((SFunction)visit(function)); + } + List statements = new ArrayList<>(); for (StatementContext statement : ctx.statement()) { statements.add((AStatement)visit(statement)); } - return new SSource(location(ctx), statements); + return new SSource(sourceName, sourceText, (ExecuteReserved)reserved.pop(), location(ctx), functions, statements); + } + + @Override + public Object visitFunction(FunctionContext ctx) { + reserved.push(new FunctionReserved()); + + String rtnType = ctx.decltype().getText(); + String name = ctx.ID().getText(); + List paramTypes = new ArrayList<>(); + List paramNames = new ArrayList<>(); + List statements = new ArrayList<>(); + + for (DecltypeContext decltype : ctx.parameters().decltype()) { + paramTypes.add(decltype.getText()); + } + + for (TerminalNode id : ctx.parameters().ID()) { + paramNames.add(id.getText()); + } + + for (StatementContext statement : ctx.block().statement()) { + statements.add((AStatement)visit(statement)); + } + + return new SFunction((FunctionReserved)reserved.pop(), location(ctx), rtnType, name, paramTypes, paramNames, statements); + } + + @Override + public Object visitParameters(ParametersContext ctx) { + throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); } @Override @@ -222,18 +272,16 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public Object visitWhile(WhileContext ctx) { - if (settings.getMaxLoopCounter() > 0) { - reserved.usesLoop(); - } + reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter()); AExpression expression = (AExpression)visitExpression(ctx.expression()); if (ctx.trailer() != null) { SBlock block = (SBlock)visit(ctx.trailer()); - return new SWhile(location(ctx), settings.getMaxLoopCounter(), expression, block); + return new SWhile(location(ctx), expression, block); } else if (ctx.empty() != null) { - return new SWhile(location(ctx), settings.getMaxLoopCounter(), expression, null); + return new SWhile(location(ctx), expression, null); } else { throw location(ctx).createError(new IllegalStateException(" Illegal tree structure.")); } @@ -241,21 +289,17 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public Object visitDo(DoContext ctx) { - if (settings.getMaxLoopCounter() > 0) { - reserved.usesLoop(); - } + reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter()); AExpression expression = (AExpression)visitExpression(ctx.expression()); SBlock block = (SBlock)visit(ctx.block()); - return new SDo(location(ctx), settings.getMaxLoopCounter(), block, expression); + return new SDo(location(ctx), block, expression); } @Override public Object visitFor(ForContext ctx) { - if (settings.getMaxLoopCounter() > 0) { - reserved.usesLoop(); - } + reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter()); ANode initializer = ctx.initializer() == null ? null : (ANode)visit(ctx.initializer()); AExpression expression = ctx.expression() == null ? null : (AExpression)visitExpression(ctx.expression()); @@ -264,9 +308,9 @@ public final class Walker extends PainlessParserBaseVisitor { if (ctx.trailer() != null) { SBlock block = (SBlock)visit(ctx.trailer()); - return new SFor(location(ctx), settings.getMaxLoopCounter(), initializer, expression, afterthought, block); + return new SFor(location(ctx), initializer, expression, afterthought, block); } else if (ctx.empty() != null) { - return new SFor(location(ctx), settings.getMaxLoopCounter(), initializer, expression, afterthought, null); + return new SFor(location(ctx), initializer, expression, afterthought, null); } else { throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); } @@ -274,16 +318,14 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public Object visitEach(EachContext ctx) { - if (settings.getMaxLoopCounter() > 0) { - reserved.usesLoop(); - } + reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter()); String type = ctx.decltype().getText(); String name = ctx.ID().getText(); AExpression expression = (AExpression)visitExpression(ctx.expression()); SBlock block = (SBlock)visit(ctx.trailer()); - return new SEach(location(ctx), settings.getMaxLoopCounter(), type, name, expression, block); + return new SEach(location(ctx), type, name, expression, block); } @Override @@ -796,7 +838,18 @@ public final class Walker extends PainlessParserBaseVisitor { List links = new ArrayList<>(); links.add(new LVariable(location(ctx), name)); - reserved.markReserved(name); + reserved.peek().markReserved(name); + + return links; + } + + @Override + public Object visitCalllocal(CalllocalContext ctx) { + String name = ctx.ID().getText(); + @SuppressWarnings("unchecked") + List arguments = (List)visit(ctx.arguments()); + List links = new ArrayList<>(); + links.add(new LCallLocal(location(ctx), name, arguments)); return links; } @@ -830,7 +883,7 @@ public final class Walker extends PainlessParserBaseVisitor { @SuppressWarnings("unchecked") List arguments = (List)visit(ctx.arguments()); - return new LCall(location(ctx), name, arguments); + return new LCallInvoke(location(ctx), name, arguments); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java index c5494b978d8..fbaaa83a6ad 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.AnalyzerCaster; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -106,7 +106,7 @@ public abstract class AExpression extends ANode { /** * Checks for errors and collects data for the writing phase. */ - abstract void analyze(Variables variables); + abstract void analyze(Locals locals); /** * Writes ASM based on the data collected during the analysis phase. @@ -118,7 +118,7 @@ public abstract class AExpression extends ANode { * nodes with the constant variable set to a non-null value with {@link EConstant}. * @return The new child node for the parent node calling this method. */ - AExpression cast(Variables variables) { + AExpression cast(Locals locals) { final Cast cast = AnalyzerCaster.getLegalCast(location, actual, expected, explicit, internal); if (cast == null) { @@ -136,7 +136,7 @@ public abstract class AExpression extends ANode { // will already be the same. EConstant econstant = new EConstant(location, constant); - econstant.analyze(variables); + econstant.analyze(locals); if (!expected.equals(econstant.actual)) { throw createError(new IllegalStateException("Illegal tree structure.")); @@ -170,7 +170,7 @@ public abstract class AExpression extends ANode { constant = AnalyzerCaster.constCast(location, constant, cast); EConstant econstant = new EConstant(location, constant); - econstant.analyze(variables); + econstant.analyze(locals); if (!expected.equals(econstant.actual)) { throw createError(new IllegalStateException("Illegal tree structure.")); @@ -201,7 +201,7 @@ public abstract class AExpression extends ANode { // the EConstant will already be the same. EConstant econstant = new EConstant(location, constant); - econstant.analyze(variables); + econstant.analyze(locals); if (!actual.equals(econstant.actual)) { throw createError(new IllegalStateException("Illegal tree structure.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ALink.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ALink.java index 19875ebeec5..6d52bfe0de1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ALink.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ALink.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -86,7 +86,7 @@ public abstract class ALink extends ANode { * def or a shortcut is used. Otherwise, returns itself. This will be * updated into the {@link EChain} node's list of links. */ - abstract ALink analyze(Variables variables); + abstract ALink analyze(Locals locals); /** * Write values before a load/store occurs such as an array index. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java index b9e6679f630..e34d174ac43 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -115,7 +115,7 @@ public abstract class AStatement extends ANode { /** * Checks for errors and collects data for the writing phase. */ - abstract void analyze(Variables variables); + abstract void analyze(Locals locals); /** * Writes ASM based on the data collected during the analysis phase. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index 32a6e2382f2..55c2cc18210 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -26,7 +26,7 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; /** * Represents a binary math expression. @@ -48,35 +48,35 @@ public final class EBinary extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (operation == Operation.MUL) { - analyzeMul(variables); + analyzeMul(locals); } else if (operation == Operation.DIV) { - analyzeDiv(variables); + analyzeDiv(locals); } else if (operation == Operation.REM) { - analyzeRem(variables); + analyzeRem(locals); } else if (operation == Operation.ADD) { - analyzeAdd(variables); + analyzeAdd(locals); } else if (operation == Operation.SUB) { - analyzeSub(variables); + analyzeSub(locals); } else if (operation == Operation.LSH) { - analyzeLSH(variables); + analyzeLSH(locals); } else if (operation == Operation.RSH) { - analyzeRSH(variables); + analyzeRSH(locals); } else if (operation == Operation.USH) { - analyzeUSH(variables); + analyzeUSH(locals); } else if (operation == Operation.BWAND) { - analyzeBWAnd(variables); + analyzeBWAnd(locals); } else if (operation == Operation.XOR) { - analyzeXor(variables); + analyzeXor(locals); } else if (operation == Operation.BWOR) { - analyzeBWOr(variables); + analyzeBWOr(locals); } else { throw createError(new IllegalStateException("Illegal tree structure.")); } } - private void analyzeMul(Variables variables) { + private void analyzeMul(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -112,7 +112,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeDiv(Variables variables) { + private void analyzeDiv(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -152,7 +152,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeRem(Variables variables) { + private void analyzeRem(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -192,7 +192,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeAdd(Variables variables) { + private void analyzeAdd(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -244,7 +244,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeSub(Variables variables) { + private void analyzeSub(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -280,7 +280,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeLSH(Variables variables) { + private void analyzeLSH(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -313,7 +313,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeRSH(Variables variables) { + private void analyzeRSH(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -346,7 +346,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeUSH(Variables variables) { + private void analyzeUSH(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -379,7 +379,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeBWAnd(Variables variables) { + private void analyzeBWAnd(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -411,7 +411,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeXor(Variables variables) { + private void analyzeXor(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -445,7 +445,7 @@ public final class EBinary extends AExpression { actual = promote; } - private void analyzeBWOr(Variables variables) { + private void analyzeBWOr(Locals variables) { left.analyze(variables); right.analyze(variables); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBool.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBool.java index dd221f040c4..5aa2daeeb34 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBool.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBool.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -44,14 +44,14 @@ public final class EBool extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { left.expected = Definition.BOOLEAN_TYPE; - left.analyze(variables); - left = left.cast(variables); + left.analyze(locals); + left = left.cast(locals); right.expected = Definition.BOOLEAN_TYPE; - right.analyze(variables); - right = right.cast(variables); + right.analyze(locals); + right = right.cast(locals); if (left.constant != null && right.constant != null) { if (operation == Operation.AND) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBoolean.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBoolean.java index 5065989600f..877e79549af 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBoolean.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBoolean.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -36,7 +36,7 @@ public final class EBoolean extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { actual = Definition.BOOLEAN_TYPE; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index d6c16b6ffc2..4e125a55de6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -24,8 +24,8 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Variable; import org.objectweb.asm.Type; import static org.elasticsearch.painless.WriterConstants.DEF_BOOTSTRAP_HANDLE; @@ -52,7 +52,7 @@ public class ECapturingFunctionRef extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals variables) { captured = variables.getVariable(location, type); if (expected == null) { defInterface = true; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java index 27974240125..7892b918ed1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -45,7 +45,7 @@ final class ECast extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { throw createError(new IllegalStateException("Illegal tree structure.")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java index 9b2968db565..e21a1dd3134 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java @@ -26,7 +26,7 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -59,20 +59,20 @@ public final class EChain extends AExpression { } @Override - void analyze(Variables variables) { - analyzeLinks(variables); + void analyze(Locals locals) { + analyzeLinks(locals); analyzeIncrDecr(); if (operation != null) { - analyzeCompound(variables); + analyzeCompound(locals); } else if (expression != null) { - analyzeWrite(variables); + analyzeWrite(locals); } else { analyzeRead(); } } - private void analyzeLinks(Variables variables) { + private void analyzeLinks(Locals variables) { ALink previous = null; int index = 0; @@ -153,7 +153,7 @@ public final class EChain extends AExpression { } } - private void analyzeCompound(Variables variables) { + private void analyzeCompound(Locals variables) { ALink last = links.get(links.size() - 1); expression.analyze(variables); @@ -214,7 +214,7 @@ public final class EChain extends AExpression { this.actual = read ? last.after : Definition.VOID_TYPE; } - private void analyzeWrite(Variables variables) { + private void analyzeWrite(Locals variables) { ALink last = links.get(links.size() - 1); // If the store node is a def node, we remove the cast to def from the expression diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java index 407b59f92cf..ec84600d323 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java @@ -25,7 +25,7 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -56,29 +56,29 @@ public final class EComp extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (operation == Operation.EQ) { - analyzeEq(variables); + analyzeEq(locals); } else if (operation == Operation.EQR) { - analyzeEqR(variables); + analyzeEqR(locals); } else if (operation == Operation.NE) { - analyzeNE(variables); + analyzeNE(locals); } else if (operation == Operation.NER) { - analyzeNER(variables); + analyzeNER(locals); } else if (operation == Operation.GTE) { - analyzeGTE(variables); + analyzeGTE(locals); } else if (operation == Operation.GT) { - analyzeGT(variables); + analyzeGT(locals); } else if (operation == Operation.LTE) { - analyzeLTE(variables); + analyzeLTE(locals); } else if (operation == Operation.LT) { - analyzeLT(variables); + analyzeLT(locals); } else { throw createError(new IllegalStateException("Illegal tree structure.")); } } - private void analyzeEq(Variables variables) { + private void analyzeEq(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -124,7 +124,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeEqR(Variables variables) { + private void analyzeEqR(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -166,7 +166,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeNE(Variables variables) { + private void analyzeNE(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -212,7 +212,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeNER(Variables variables) { + private void analyzeNER(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -254,7 +254,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeGTE(Variables variables) { + private void analyzeGTE(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -290,7 +290,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeGT(Variables variables) { + private void analyzeGT(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -326,7 +326,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeLTE(Variables variables) { + private void analyzeLTE(Locals variables) { left.analyze(variables); right.analyze(variables); @@ -362,7 +362,7 @@ public final class EComp extends AExpression { actual = Definition.BOOLEAN_TYPE; } - private void analyzeLT(Variables variables) { + private void analyzeLT(Locals variables) { left.analyze(variables); right.analyze(variables); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java index 65231c45ed3..024e0c800fc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.AnalyzerCaster; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -45,10 +45,10 @@ public final class EConditional extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); if (condition.constant != null) { throw createError(new IllegalArgumentException("Extraneous conditional statement.")); @@ -62,8 +62,8 @@ public final class EConditional extends AExpression { right.internal = internal; actual = expected; - left.analyze(variables); - right.analyze(variables); + left.analyze(locals); + right.analyze(locals); if (expected == null) { final Type promote = AnalyzerCaster.promoteConditional(left.actual, right.actual, left.constant, right.constant); @@ -73,8 +73,8 @@ public final class EConditional extends AExpression { actual = promote; } - left = left.cast(variables); - right = right.cast(variables); + left = left.cast(locals); + right = right.cast(locals); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConstant.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConstant.java index 8cb032c5c3f..267d32983b9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConstant.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConstant.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -38,7 +38,7 @@ final class EConstant extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (constant instanceof String) { actual = Definition.STRING_TYPE; } else if (constant instanceof Double) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EDecimal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EDecimal.java index e59002c2a3b..0fd7fe46b51 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EDecimal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EDecimal.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -38,7 +38,7 @@ public final class EDecimal extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (value.endsWith("f") || value.endsWith("F")) { try { constant = Float.parseFloat(value.substring(0, value.length() - 1)); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java index 0a55727b5e9..007a5d59e59 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -40,7 +40,7 @@ public final class EExplicit extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { try { actual = Definition.getType(this.type); } catch (IllegalArgumentException exception) { @@ -49,8 +49,8 @@ public final class EExplicit extends AExpression { child.expected = actual; child.explicit = true; - child.analyze(variables); - child = child.cast(variables); + child.analyze(locals); + child = child.cast(locals); } @Override @@ -58,11 +58,11 @@ public final class EExplicit extends AExpression { throw createError(new IllegalStateException("Illegal tree structure.")); } - AExpression cast(Variables variables) { + AExpression cast(Locals locals) { child.expected = expected; child.explicit = explicit; child.internal = internal; - return child.cast(variables); + return child.cast(locals); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index 26be001b211..9d96809bf25 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Type; import static org.elasticsearch.painless.WriterConstants.LAMBDA_BOOTSTRAP_HANDLE; @@ -36,7 +36,7 @@ import java.lang.invoke.LambdaMetafactory; public class EFunctionRef extends AExpression { public final String type; public final String call; - + private FunctionRef ref; public EFunctionRef(Location location, String type, String call) { @@ -47,7 +47,7 @@ public class EFunctionRef extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (expected == null) { ref = null; actual = Definition.getType("String"); @@ -72,22 +72,22 @@ public class EFunctionRef extends AExpression { Type samMethodType = Type.getMethodType(ref.samMethodType.toMethodDescriptorString()); Type interfaceType = Type.getMethodType(ref.interfaceMethodType.toMethodDescriptorString()); if (ref.needsBridges()) { - writer.invokeDynamic(ref.invokedName, - invokedType, - LAMBDA_BOOTSTRAP_HANDLE, - samMethodType, - ref.implMethodASM, - samMethodType, - LambdaMetafactory.FLAG_BRIDGES, - 1, + writer.invokeDynamic(ref.invokedName, + invokedType, + LAMBDA_BOOTSTRAP_HANDLE, + samMethodType, + ref.implMethodASM, + samMethodType, + LambdaMetafactory.FLAG_BRIDGES, + 1, interfaceType); } else { - writer.invokeDynamic(ref.invokedName, - invokedType, - LAMBDA_BOOTSTRAP_HANDLE, - samMethodType, - ref.implMethodASM, - samMethodType, + writer.invokeDynamic(ref.invokedName, + invokedType, + LAMBDA_BOOTSTRAP_HANDLE, + samMethodType, + ref.implMethodASM, + samMethodType, 0); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java index 3f934da4968..eefc09c5946 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.MethodWriter; @@ -35,7 +35,7 @@ public final class ENull extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { isNull = true; if (expected != null) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENumeric.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENumeric.java index 53f3559c294..9abdc8f6a12 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENumeric.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENumeric.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -41,7 +41,7 @@ public final class ENumeric extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (value.endsWith("d") || value.endsWith("D")) { if (radix != 10) { throw createError(new IllegalStateException("Illegal tree structure.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java index 89d654cdf1f..2d7d8d4fd49 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java @@ -25,7 +25,7 @@ import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Operation; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -49,21 +49,21 @@ public final class EUnary extends AExpression { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (operation == Operation.NOT) { - analyzeNot(variables); + analyzeNot(locals); } else if (operation == Operation.BWNOT) { - analyzeBWNot(variables); + analyzeBWNot(locals); } else if (operation == Operation.ADD) { - analyzerAdd(variables); + analyzerAdd(locals); } else if (operation == Operation.SUB) { - analyzerSub(variables); + analyzerSub(locals); } else { throw createError(new IllegalStateException("Illegal tree structure.")); } } - void analyzeNot(Variables variables) { + void analyzeNot(Locals variables) { child.expected = Definition.BOOLEAN_TYPE; child.analyze(variables); child = child.cast(variables); @@ -75,7 +75,7 @@ public final class EUnary extends AExpression { actual = Definition.BOOLEAN_TYPE; } - void analyzeBWNot(Variables variables) { + void analyzeBWNot(Locals variables) { child.analyze(variables); Type promote = AnalyzerCaster.promoteNumeric(child.actual, false); @@ -102,7 +102,7 @@ public final class EUnary extends AExpression { actual = promote; } - void analyzerAdd(Variables variables) { + void analyzerAdd(Locals variables) { child.analyze(variables); Type promote = AnalyzerCaster.promoteNumeric(child.actual, true); @@ -133,7 +133,7 @@ public final class EUnary extends AExpression { actual = promote; } - void analyzerSub(Variables variables) { + void analyzerSub(Locals variables) { child.analyze(variables); Type promote = AnalyzerCaster.promoteNumeric(child.actual, true); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LArrayLength.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LArrayLength.java index c80cc8b8ada..38c1ae43907 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LArrayLength.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LArrayLength.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -38,7 +38,7 @@ public final class LArrayLength extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if ("length".equals(value)) { if (!load) { throw createError(new IllegalArgumentException("Must read array field [length].")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java index b0816540c5e..91346b7a208 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -42,7 +42,7 @@ public final class LBrace extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before == null) { throw createError(new IllegalArgumentException("Illegal array access made without target.")); } @@ -51,18 +51,18 @@ public final class LBrace extends ALink { if (sort == Sort.ARRAY) { index.expected = Definition.INT_TYPE; - index.analyze(variables); - index = index.cast(variables); + index.analyze(locals); + index = index.cast(locals); after = Definition.getType(before.struct, before.dimensions - 1); return this; } else if (sort == Sort.DEF) { - return new LDefArray(location, index).copy(this).analyze(variables); + return new LDefArray(location, index).copy(this).analyze(locals); } else if (Map.class.isAssignableFrom(before.clazz)) { - return new LMapShortcut(location, index).copy(this).analyze(variables); + return new LMapShortcut(location, index).copy(this).analyze(locals); } else if (List.class.isAssignableFrom(before.clazz)) { - return new LListShortcut(location, index).copy(this).analyze(variables); + return new LListShortcut(location, index).copy(this).analyze(locals); } throw createError(new IllegalArgumentException("Illegal array access on type [" + before.name + "].")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallInvoke.java similarity index 79% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallInvoke.java index 53e66ee68b1..1f6e899b1dd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallInvoke.java @@ -19,12 +19,12 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Definition.MethodKey; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Definition.Struct; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -32,14 +32,14 @@ import java.util.List; /** * Represents a method call or deferes to a def call. */ -public final class LCall extends ALink { +public final class LCallInvoke extends ALink { final String name; final List arguments; Method method = null; - public LCall(Location location, String name, List arguments) { + public LCallInvoke(Location location, String name, List arguments) { super(location, -1); this.name = name; @@ -47,7 +47,7 @@ public final class LCall extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before == null) { throw createError(new IllegalArgumentException("Illegal call [" + name + "] made without target.")); } else if (before.sort == Sort.ARRAY) { @@ -56,7 +56,7 @@ public final class LCall extends ALink { throw createError(new IllegalArgumentException("Cannot assign a value to a call [" + name + "].")); } - Definition.MethodKey methodKey = new Definition.MethodKey(name, arguments.size()); + MethodKey methodKey = new MethodKey(name, arguments.size()); Struct struct = before.struct; method = statik ? struct.staticMethods.get(methodKey) : struct.methods.get(methodKey); @@ -66,8 +66,8 @@ public final class LCall extends ALink { expression.expected = method.arguments.get(argument); expression.internal = true; - expression.analyze(variables); - arguments.set(argument, expression.cast(variables)); + expression.analyze(locals); + arguments.set(argument, expression.cast(locals)); } statement = true; @@ -78,11 +78,11 @@ public final class LCall extends ALink { ALink link = new LDefCall(location, name, arguments); link.copy(this); - return link.analyze(variables); + return link.analyze(locals); } - throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + - "] arguments on type [" + struct.name + "].")); + throw createError(new IllegalArgumentException( + "Unknown call [" + name + "] with [" + arguments.size() + "] arguments on type [" + struct.name + "].")); } @Override @@ -105,10 +105,6 @@ public final class LCall extends ALink { } else { writer.invokeVirtual(method.owner.type, method.method); } - - if (!method.rtn.clazz.equals(method.handle.type().returnType())) { - writer.checkCast(method.rtn.type); - } } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallLocal.java new file mode 100644 index 00000000000..31085171bf6 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCallLocal.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.MethodKey; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; + +import java.util.List; + +import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; + +/** + * Represents a user-defined call. + */ +public class LCallLocal extends ALink { + + final String name; + final List arguments; + + Method method = null; + + public LCallLocal(Location location, String name, List arguments) { + super(location, -1); + + this.name = name; + this.arguments = arguments; + } + + @Override + ALink analyze(Locals locals) { + if (before != null) { + throw createError(new IllegalArgumentException("Illegal call [" + name + "] against an existing target.")); + } else if (store) { + throw createError(new IllegalArgumentException("Cannot assign a value to a call [" + name + "].")); + } + + MethodKey methodKey = new MethodKey(name, arguments.size()); + method = locals.getMethod(methodKey); + + if (method != null) { + for (int argument = 0; argument < arguments.size(); ++argument) { + AExpression expression = arguments.get(argument); + + expression.expected = method.arguments.get(argument); + expression.internal = true; + expression.analyze(locals); + arguments.set(argument, expression.cast(locals)); + } + + statement = true; + after = method.rtn; + + return this; + } + + throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } + + @Override + void write(MethodWriter writer) { + // Do nothing. + } + + @Override + void load(MethodWriter writer) { + writer.writeDebugInfo(location); + + for (AExpression argument : arguments) { + argument.write(writer); + } + + writer.invokeStatic(CLASS_TYPE, method.method); + } + + @Override + void store(MethodWriter writer) { + throw createError(new IllegalStateException("Illegal tree structure.")); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCast.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCast.java index de4a22a5ad8..d2b4f83a823 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCast.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCast.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.AnalyzerCaster; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -42,7 +42,7 @@ public final class LCast extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before == null) { throw createError(new IllegalStateException("Illegal cast without a target.")); } else if (store) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java index 40e95879634..dc8890e5122 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.DefBootstrap; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Type; import org.elasticsearch.painless.MethodWriter; @@ -42,10 +42,10 @@ final class LDefArray extends ALink implements IDefLink { } @Override - ALink analyze(Variables variables) { - index.analyze(variables); + ALink analyze(Locals locals) { + index.analyze(locals); index.expected = index.actual; - index = index.cast(variables); + index = index.cast(locals); after = Definition.DEF_TYPE; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java index 3a06b9c899e..554144c2999 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.DefBootstrap; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -46,14 +46,14 @@ final class LDefCall extends ALink implements IDefLink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (arguments.size() > 63) { // technically, the limitation is just methods with > 63 params, containing method references. // this is because we are lazy and use a long as a bitset. we can always change to a "string" if need be. // but NEED NOT BE. nothing with this many parameters is in the whitelist and we do not support varargs. throw new UnsupportedOperationException("methods with > 63 arguments are currently not supported"); } - + recipe = 0; int totalCaptures = 0; for (int argument = 0; argument < arguments.size(); ++argument) { @@ -66,9 +66,9 @@ final class LDefCall extends ALink implements IDefLink { totalCaptures++; } expression.internal = true; - expression.analyze(variables); + expression.analyze(locals); expression.expected = expression.actual; - arguments.set(argument, expression.cast(variables)); + arguments.set(argument, expression.cast(locals)); } statement = true; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java index aad11d2e91a..91ee8e0f03d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.DefBootstrap; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Type; import org.elasticsearch.painless.MethodWriter; @@ -43,7 +43,7 @@ final class LDefField extends ALink implements IDefLink { @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { after = Definition.DEF_TYPE; return this; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java index 8f34692d666..049dd2d8524 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java @@ -24,7 +24,7 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Field; import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Definition.Struct; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -46,7 +46,7 @@ public final class LField extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before == null) { throw createError(new IllegalArgumentException("Illegal field [" + value + "] access made without target.")); } @@ -54,9 +54,9 @@ public final class LField extends ALink { Sort sort = before.sort; if (sort == Sort.ARRAY) { - return new LArrayLength(location, value).copy(this).analyze(variables); + return new LArrayLength(location, value).copy(this).analyze(locals); } else if (sort == Sort.DEF) { - return new LDefField(location, value).copy(this).analyze(variables); + return new LDefField(location, value).copy(this).analyze(locals); } Struct struct = before.struct; @@ -81,17 +81,17 @@ public final class LField extends ALink { Character.toUpperCase(value.charAt(0)) + value.substring(1), 1)); if (shortcut) { - return new LShortcut(location, value).copy(this).analyze(variables); + return new LShortcut(location, value).copy(this).analyze(locals); } else { EConstant index = new EConstant(location, value); - index.analyze(variables); + index.analyze(locals); if (Map.class.isAssignableFrom(before.clazz)) { - return new LMapShortcut(location, index).copy(this).analyze(variables); + return new LMapShortcut(location, index).copy(this).analyze(locals); } if (List.class.isAssignableFrom(before.clazz)) { - return new LListShortcut(location, index).copy(this).analyze(variables); + return new LListShortcut(location, index).copy(this).analyze(locals); } } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java index a8252b40770..7b0feb094e1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -42,7 +42,7 @@ final class LListShortcut extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { getter = before.struct.methods.get(new Definition.MethodKey("get", 1)); setter = before.struct.methods.get(new Definition.MethodKey("set", 2)); @@ -62,8 +62,8 @@ final class LListShortcut extends ALink { if ((load || store) && (!load || getter != null) && (!store || setter != null)) { index.expected = Definition.INT_TYPE; - index.analyze(variables); - index = index.cast(variables); + index.analyze(locals); + index = index.cast(locals); after = setter != null ? setter.arguments.get(1) : getter.rtn; } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java index f31179d135a..5d4c5a9e50a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java @@ -23,7 +23,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -42,7 +42,7 @@ final class LMapShortcut extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { getter = before.struct.methods.get(new Definition.MethodKey("get", 1)); setter = before.struct.methods.get(new Definition.MethodKey("put", 2)); @@ -61,8 +61,8 @@ final class LMapShortcut extends ALink { if ((load || store) && (!load || getter != null) && (!store || setter != null)) { index.expected = setter != null ? setter.arguments.get(0) : getter.arguments.get(0); - index.analyze(variables); - index = index.cast(variables); + index.analyze(locals); + index = index.cast(locals); after = setter != null ? setter.arguments.get(1) : getter.rtn; } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java index 75a4aecbe19..15aa01c1d26 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -43,7 +43,7 @@ public final class LNewArray extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before != null) { throw createError(new IllegalArgumentException("Cannot create a new array with a target already defined.")); } else if (store) { @@ -64,8 +64,8 @@ public final class LNewArray extends ALink { AExpression expression = arguments.get(argument); expression.expected = Definition.INT_TYPE; - expression.analyze(variables); - arguments.set(argument, expression.cast(variables)); + expression.analyze(locals); + arguments.set(argument, expression.cast(locals)); } after = Definition.getType(type.struct, arguments.size()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewObj.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewObj.java index 2f80b254350..aeb6f64f9db 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewObj.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewObj.java @@ -24,7 +24,7 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Struct; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; import java.util.List; @@ -47,7 +47,7 @@ public final class LNewObj extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before != null) { throw createError(new IllegalArgumentException("Illegal new call with a target already defined.")); } else if (store) { @@ -79,8 +79,8 @@ public final class LNewObj extends ALink { expression.expected = types[argument]; expression.internal = true; - expression.analyze(variables); - arguments.set(argument, expression.cast(variables)); + expression.analyze(locals); + arguments.set(argument, expression.cast(locals)); } statement = true; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java index a91970fa577..73cbc201db4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java @@ -24,7 +24,7 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Definition.Struct; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -44,7 +44,7 @@ final class LShortcut extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { Struct struct = before.struct; getter = struct.methods.get(new Definition.MethodKey("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0)); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LStatic.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LStatic.java index 98774513188..6f04e9a22c4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LStatic.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LStatic.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; /** * Represents a static type target. @@ -38,7 +38,7 @@ public final class LStatic extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before != null) { throw createError(new IllegalArgumentException("Illegal static type [" + type + "] after target already defined.")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LString.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LString.java index 6afe62d02ba..41eb027e3ab 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LString.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LString.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -36,7 +36,7 @@ public final class LString extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before != null) { throw createError(new IllegalArgumentException("Illegal String constant [" + string + "].")); } else if (store) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LVariable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LVariable.java index 8fe6f17b0b5..64dc852117d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LVariable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LVariable.java @@ -21,8 +21,8 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Variable; import org.objectweb.asm.Opcodes; /** @@ -41,12 +41,12 @@ public final class LVariable extends ALink { } @Override - ALink analyze(Variables variables) { + ALink analyze(Locals locals) { if (before != null) { throw createError(new IllegalArgumentException("Illegal variable [" + name + "] access with target already defined.")); } - Variable variable = variables.getVariable(location, name); + Variable variable = locals.getVariable(location, name); if (store && variable.readonly) { throw createError(new IllegalArgumentException("Variable [" + variable.name + "] is read-only.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java index 4dbbd80de54..5fec362ec17 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -40,7 +40,7 @@ public final class SBlock extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (statements == null || statements.isEmpty()) { throw createError(new IllegalArgumentException("A block must contain at least one statement.")); } @@ -58,7 +58,7 @@ public final class SBlock extends AStatement { statement.lastSource = lastSource && statement == last; statement.lastLoop = (beginLoop || lastLoop) && statement == last; - statement.analyze(variables); + statement.analyze(locals); methodEscape = statement.methodEscape; loopEscape = statement.loopEscape; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java index ca72dd0b55b..d4f8bfff4e4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -33,7 +33,7 @@ public final class SBreak extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (!inLoop) { throw createError(new IllegalArgumentException("Break statement outside of a loop.")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java index 8bcaf9d22cf..42de28e48f5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java @@ -22,8 +22,8 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.MethodWriter; @@ -52,7 +52,7 @@ public final class SCatch extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { final Type type; try { @@ -65,14 +65,14 @@ public final class SCatch extends AStatement { throw createError(new ClassCastException("Not an exception type [" + this.type + "].")); } - variable = variables.addVariable(location, type, name, true, false); + variable = locals.addVariable(location, type, name, true, false); if (block != null) { block.lastSource = lastSource; block.inLoop = inLoop; block.lastLoop = lastLoop; - block.analyze(variables); + block.analyze(locals); methodEscape = block.methodEscape; loopEscape = block.loopEscape; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java index ef80766bdd1..802cf90087b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -33,7 +33,7 @@ public final class SContinue extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (!inLoop) { throw createError(new IllegalArgumentException("Continue statement outside of a loop.")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java index 1ff187afe29..5365e72888a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -40,9 +40,9 @@ public final class SDeclBlock extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { for (SDeclaration declaration : declarations) { - declaration.analyze(variables); + declaration.analyze(locals); } statementCount = declarations.size(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java index 5f039184cb6..a62a287df9c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java @@ -22,8 +22,8 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Variable; import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.MethodWriter; @@ -47,7 +47,7 @@ public final class SDeclaration extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { final Type type; try { @@ -58,11 +58,11 @@ public final class SDeclaration extends AStatement { if (expression != null) { expression.expected = type; - expression.analyze(variables); - expression = expression.cast(variables); + expression.analyze(locals); + expression = expression.cast(locals); } - variable = variables.addVariable(location, type, name, false, false); + variable = locals.addVariable(location, type, name, false, false); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java index 4989fb77a79..b25d903dc41 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -30,21 +30,19 @@ import org.elasticsearch.painless.MethodWriter; */ public final class SDo extends AStatement { - final int maxLoopCounter; final SBlock block; AExpression condition; - public SDo(Location location, int maxLoopCounter, SBlock block, AExpression condition) { + public SDo(Location location, SBlock block, AExpression condition) { super(location); this.condition = condition; this.block = block; - this.maxLoopCounter = maxLoopCounter; } @Override - void analyze(Variables variables) { - variables.incrementScope(); + void analyze(Locals locals) { + locals.incrementScope(); if (block == null) { throw createError(new IllegalArgumentException("Extraneous do while loop.")); @@ -53,15 +51,15 @@ public final class SDo extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block.analyze(locals); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous do while loop.")); } condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); if (condition.constant != null) { final boolean continuous = (boolean)condition.constant; @@ -78,11 +76,11 @@ public final class SDo extends AStatement { statementCount = 1; - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; + if (locals.getMaxLoopCounter() > 0) { + loopCounterSlot = locals.getVariable(location, "#loop").slot; } - variables.decrementScope(); + locals.decrementScope(); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 2a8fb0e4063..a6156324873 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -29,8 +29,8 @@ import org.elasticsearch.painless.Definition.Sort; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; @@ -44,7 +44,6 @@ import static org.elasticsearch.painless.WriterConstants.ITERATOR_TYPE; */ public class SEach extends AStatement { - final int maxLoopCounter; final String type; final String name; AExpression expression; @@ -63,10 +62,9 @@ public class SEach extends AStatement { Variable iterator = null; Method method = null; - public SEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { + public SEach(Location location, String type, String name, AExpression expression, SBlock block) { super(location); - this.maxLoopCounter = maxLoopCounter; this.type = type; this.name = name; this.expression = expression; @@ -74,10 +72,10 @@ public class SEach extends AStatement { } @Override - void analyze(Variables variables) { - expression.analyze(variables); + void analyze(Locals locals) { + expression.analyze(locals); expression.expected = expression.actual; - expression = expression.cast(variables); + expression = expression.cast(locals); final Type type; @@ -87,25 +85,25 @@ public class SEach extends AStatement { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } - variables.incrementScope(); + locals.incrementScope(); - variable = variables.addVariable(location, type, name, true, false); + variable = locals.addVariable(location, type, name, true, false); if (expression.actual.sort == Sort.ARRAY) { - analyzeArray(variables, type); + analyzeArray(locals, type); } else if (expression.actual.sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { - analyzeIterable(variables, type); + analyzeIterable(locals, type); } else { - throw location.createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); + throw createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); } if (block == null) { - throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + throw createError(new IllegalArgumentException("Extraneous for each loop.")); } block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block.analyze(locals); block.statementCount = Math.max(1, block.statementCount); if (block.loopEscape && !block.anyContinue) { @@ -114,14 +112,14 @@ public class SEach extends AStatement { statementCount = 1; - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; + if (locals.getMaxLoopCounter() > 0) { + loopCounterSlot = locals.getVariable(location, "#loop").slot; } - variables.decrementScope(); + locals.decrementScope(); } - void analyzeArray(Variables variables, Type type) { + void analyzeArray(Locals variables, Type type) { // We must store the array and index as variables for securing slots on the stack, and // also add the location offset to make the names unique in case of nested for each loops. array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); @@ -130,7 +128,7 @@ public class SEach extends AStatement { cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); } - void analyzeIterable(Variables variables, Type type) { + void analyzeIterable(Locals variables, Type type) { // We must store the iterator as a variable for securing a slot on the stack, and // also add the location offset to make the name unique in case of nested for each loops. iterator = variables.addVariable(location, Definition.getType("Iterator"), "#itr" + location.getOffset(), true, false); @@ -141,7 +139,7 @@ public class SEach extends AStatement { method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); if (method == null) { - throw location.createError(new IllegalArgumentException( + throw createError(new IllegalArgumentException( "Unable to create iterator for the type [" + expression.actual.name + "].")); } } @@ -158,7 +156,7 @@ public class SEach extends AStatement { } else if (iterator != null) { writeIterable(writer); } else { - throw location.createError(new IllegalStateException("Illegal tree structure.")); + throw createError(new IllegalStateException("Illegal tree structure.")); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java index 1bea07d5599..0a65664fa74 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java @@ -20,9 +20,10 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -39,19 +40,22 @@ public final class SExpression extends AStatement { } @Override - void analyze(Variables variables) { - expression.read = lastSource; - expression.analyze(variables); + void analyze(Locals locals) { + Type rtnType = locals.getReturnType(); + boolean isVoid = rtnType.sort == Sort.VOID; + + expression.read = lastSource && !isVoid; + expression.analyze(locals); if (!lastSource && !expression.statement) { throw createError(new IllegalArgumentException("Not a statement.")); } - final boolean rtn = lastSource && expression.actual.sort != Sort.VOID; + boolean rtn = lastSource && !isVoid && expression.actual.sort != Sort.VOID; - expression.expected = rtn ? Definition.OBJECT_TYPE : expression.actual; + expression.expected = rtn ? rtnType : expression.actual; expression.internal = rtn; - expression = expression.cast(variables); + expression = expression.cast(locals); methodEscape = rtn; loopEscape = rtn; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java index 04475a91a1a..cc9dee20138 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -30,17 +30,14 @@ import org.elasticsearch.painless.MethodWriter; */ public final class SFor extends AStatement { - final int maxLoopCounter; ANode initializer; AExpression condition; AExpression afterthought; final SBlock block; - public SFor(Location location, int maxLoopCounter, - ANode initializer, AExpression condition, AExpression afterthought, SBlock block) { + public SFor(Location location, ANode initializer, AExpression condition, AExpression afterthought, SBlock block) { super(location); - this.maxLoopCounter = maxLoopCounter; this.initializer = initializer; this.condition = condition; this.afterthought = afterthought; @@ -48,19 +45,19 @@ public final class SFor extends AStatement { } @Override - void analyze(Variables variables) { - variables.incrementScope(); + void analyze(Locals locals) { + locals.incrementScope(); boolean continuous = false; if (initializer != null) { if (initializer instanceof AStatement) { - ((AStatement)initializer).analyze(variables); + ((AStatement)initializer).analyze(locals); } else if (initializer instanceof AExpression) { AExpression initializer = (AExpression)this.initializer; initializer.read = false; - initializer.analyze(variables); + initializer.analyze(locals); if (!initializer.statement) { throw createError(new IllegalArgumentException("Not a statement.")); @@ -72,8 +69,8 @@ public final class SFor extends AStatement { if (condition != null) { condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); if (condition.constant != null) { continuous = (boolean)condition.constant; @@ -92,7 +89,7 @@ public final class SFor extends AStatement { if (afterthought != null) { afterthought.read = false; - afterthought.analyze(variables); + afterthought.analyze(locals); if (!afterthought.statement) { throw createError(new IllegalArgumentException("Not a statement.")); @@ -103,7 +100,7 @@ public final class SFor extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block.analyze(locals); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous for loop.")); @@ -119,11 +116,11 @@ public final class SFor extends AStatement { statementCount = 1; - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; + if (locals.getMaxLoopCounter() > 0) { + loopCounterSlot = locals.getVariable(location, "#loop").slot; } - variables.decrementScope(); + locals.decrementScope(); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java new file mode 100644 index 00000000000..16d53f23bc6 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.Parameter; +import org.elasticsearch.painless.Locals.FunctionReserved; +import org.elasticsearch.painless.Locals.Variable; +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.objectweb.asm.Opcodes; + +import java.lang.invoke.MethodType; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Represents a user-defined function. + */ +public class SFunction extends AStatement { + final FunctionReserved reserved; + final String rtnTypeStr; + final String name; + final List paramTypeStrs; + final List paramNameStrs; + final List statements; + + Type rtnType = null; + List parameters = new ArrayList<>(); + Method method = null; + + Locals locals = null; + + public SFunction(FunctionReserved reserved, Location location, + String rtnType, String name, List paramTypes, List paramNames, List statements) { + super(location); + + this.reserved = reserved; + this.rtnTypeStr = rtnType; + this.name = name; + this.paramTypeStrs = Collections.unmodifiableList(paramTypes); + this.paramNameStrs = Collections.unmodifiableList(paramNames); + this.statements = Collections.unmodifiableList(statements); + } + + void generate() { + try { + rtnType = Definition.getType(rtnTypeStr); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Illegal return type [" + rtnTypeStr + "] for function [" + name + "].")); + } + + if (paramTypeStrs.size() != paramNameStrs.size()) { + throw createError(new IllegalStateException("Illegal tree structure.")); + } + + Class[] paramClasses = new Class[this.paramTypeStrs.size()]; + List paramTypes = new ArrayList<>(); + + for (int param = 0; param < this.paramTypeStrs.size(); ++param) { + try { + Type paramType = Definition.getType(this.paramTypeStrs.get(param)); + + paramClasses[param] = paramType.clazz; + paramTypes.add(paramType); + parameters.add(new Parameter(location, paramNameStrs.get(param), paramType)); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException( + "Illegal parameter type [" + this.paramTypeStrs.get(param) + "] for function [" + name + "].")); + } + } + + org.objectweb.asm.commons.Method method = + new org.objectweb.asm.commons.Method(name, MethodType.methodType(rtnType.clazz, paramClasses).toMethodDescriptorString()); + this.method = new Method(name, null, rtnType, paramTypes, method, 0, null); + } + + @Override + void analyze(Locals locals) { + if (statements == null || statements.isEmpty()) { + throw createError(new IllegalArgumentException("Cannot generate an empty function [" + name + "].")); + } + + this.locals = new Locals(reserved, locals, rtnType, parameters); + locals = this.locals; + + locals.incrementScope(); + + AStatement last = statements.get(statements.size() - 1); + + for (AStatement statement : statements) { + // Note that we do not need to check after the last statement because + // there is no statement that can be unreachable after the last. + if (allEscape) { + throw createError(new IllegalArgumentException("Unreachable statement.")); + } + + statement.lastSource = statement == last; + + statement.analyze(locals); + + methodEscape = statement.methodEscape; + allEscape = statement.allEscape; + } + + if (!methodEscape && rtnType.sort != Sort.VOID) { + throw createError(new IllegalArgumentException("Not all paths provide a return value for method [" + name + "].")); + } + + locals.decrementScope(); + } + + @Override + void write(MethodWriter writer) { + MethodWriter function = writer.newMethodWriter(Opcodes.ACC_PRIVATE | Opcodes.ACC_STATIC, method.method); + + if (reserved.getMaxLoopCounter() > 0) { + // if there is infinite loop protection, we do this once: + // int #loop = settings.getMaxLoopCounter() + + Variable loop = locals.getVariable(null, FunctionReserved.LOOP); + + function.push(reserved.getMaxLoopCounter()); + function.visitVarInsn(Opcodes.ISTORE, loop.slot); + } + + for (AStatement statement : statements) { + statement.write(function); + } + + if (!methodEscape) { + if (rtnType.sort == Sort.VOID) { + function.returnValue(); + } else { + throw createError(new IllegalStateException("Illegal tree structure.")); + } + } + + function.endMethod(); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java index 954ddac9c6a..a46075af9a1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -41,10 +41,10 @@ public final class SIf extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); if (condition.constant != null) { throw createError(new IllegalArgumentException("Extraneous if statement.")); @@ -58,9 +58,9 @@ public final class SIf extends AStatement { ifblock.inLoop = inLoop; ifblock.lastLoop = lastLoop; - variables.incrementScope(); - ifblock.analyze(variables); - variables.decrementScope(); + locals.incrementScope(); + ifblock.analyze(locals); + locals.decrementScope(); anyContinue = ifblock.anyContinue; anyBreak = ifblock.anyBreak; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java index 1d801267054..22cbfe25614 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -43,10 +43,10 @@ public final class SIfElse extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); if (condition.constant != null) { throw createError(new IllegalArgumentException("Extraneous if statement.")); @@ -60,9 +60,9 @@ public final class SIfElse extends AStatement { ifblock.inLoop = inLoop; ifblock.lastLoop = lastLoop; - variables.incrementScope(); - ifblock.analyze(variables); - variables.decrementScope(); + locals.incrementScope(); + ifblock.analyze(locals); + locals.decrementScope(); anyContinue = ifblock.anyContinue; anyBreak = ifblock.anyBreak; @@ -76,9 +76,9 @@ public final class SIfElse extends AStatement { elseblock.inLoop = inLoop; elseblock.lastLoop = lastLoop; - variables.incrementScope(); - elseblock.analyze(variables); - variables.decrementScope(); + locals.incrementScope(); + elseblock.analyze(locals); + locals.decrementScope(); methodEscape = ifblock.methodEscape && elseblock.methodEscape; loopEscape = ifblock.loopEscape && elseblock.loopEscape; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java index bd9707eb6a4..e789f3372e3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java @@ -19,9 +19,8 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -38,11 +37,11 @@ public final class SReturn extends AStatement { } @Override - void analyze(Variables variables) { - expression.expected = Definition.OBJECT_TYPE; + void analyze(Locals locals) { + expression.expected = locals.getReturnType(); expression.internal = true; - expression.analyze(variables); - expression = expression.cast(variables); + expression.analyze(locals); + expression = expression.cast(locals); methodEscape = true; loopEscape = true; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 22fd2e3e28c..c7764b3d60a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -19,34 +19,85 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.MethodKey; +import org.elasticsearch.painless.Executable; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Locals.ExecuteReserved; +import org.elasticsearch.painless.Locals.Variable; +import org.elasticsearch.painless.WriterConstants; +import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import java.util.BitSet; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; + +import static org.elasticsearch.painless.WriterConstants.BASE_CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.CONSTRUCTOR; +import static org.elasticsearch.painless.WriterConstants.EXECUTE; +import static org.elasticsearch.painless.WriterConstants.MAP_GET; +import static org.elasticsearch.painless.WriterConstants.MAP_TYPE; /** * The root of all Painless trees. Contains a series of statements. */ public final class SSource extends AStatement { + final String name; + final String source; + final ExecuteReserved reserved; + final List functions; final List statements; - public SSource(Location location, List statements) { + private Locals locals; + private BitSet expressions; + private byte[] bytes; + + public SSource(String name, String source, ExecuteReserved reserved, Location location, + List functions, List statements) { super(location); + this.name = name; + this.source = source; + this.reserved = reserved; + this.functions = Collections.unmodifiableList(functions); this.statements = Collections.unmodifiableList(statements); } + public void analyze() { + Map methods = new HashMap<>(); + + for (SFunction function : functions) { + function.generate(); + + MethodKey key = new MethodKey(function.name, function.parameters.size()); + + if (methods.put(key, function.method) != null) { + throw createError(new IllegalArgumentException("Duplicate functions with name [" + function.name + "].")); + } + } + + locals = new Locals(reserved, methods); + analyze(locals); + } + @Override - public void analyze(Variables variables) { + void analyze(Locals locals) { + for (SFunction function : functions) { + function.analyze(locals); + } + if (statements == null || statements.isEmpty()) { throw createError(new IllegalArgumentException("Cannot generate an empty script.")); } - variables.incrementScope(); + locals.incrementScope(); AStatement last = statements.get(statements.size() - 1); @@ -59,17 +110,95 @@ public final class SSource extends AStatement { statement.lastSource = statement == last; - statement.analyze(variables); + statement.analyze(locals); methodEscape = statement.methodEscape; allEscape = statement.allEscape; } - variables.decrementScope(); + locals.decrementScope(); + } + + public void write() { + // Create the ClassWriter. + + int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; + int classVersion = Opcodes.V1_8; + int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; + String classBase = BASE_CLASS_TYPE.getInternalName(); + String className = CLASS_TYPE.getInternalName(); + String classInterfaces[] = reserved.usesScore() ? new String[] { WriterConstants.NEEDS_SCORE_TYPE.getInternalName() } : null; + + ClassWriter writer = new ClassWriter(classFrames); + writer.visit(classVersion, classAccess, className, null, classBase, classInterfaces); + writer.visitSource(Location.computeSourceName(name, source), null); + + // Create the execute MethodWriter. + + expressions = new BitSet(source.length()); + MethodWriter execute = new MethodWriter(Opcodes.ACC_PUBLIC, EXECUTE, writer, expressions); + + // Write the constructor. + + MethodWriter constructor = execute.newMethodWriter(Opcodes.ACC_PUBLIC, CONSTRUCTOR); + constructor.loadThis(); + constructor.loadArgs(); + constructor.invokeConstructor(org.objectweb.asm.Type.getType(Executable.class), CONSTRUCTOR); + constructor.returnValue(); + constructor.endMethod(); + + // Write the execute method. + + write(execute); + execute.endMethod(); + + // End writing the class and store the generated bytes. + + writer.visitEnd(); + bytes = writer.toByteArray(); } @Override - public void write(MethodWriter writer) { + void write(MethodWriter writer) { + for (SFunction function : functions) { + function.write(writer); + } + + if (reserved.usesScore()) { + // if the _score value is used, we do this once: + // final double _score = scorer.score(); + Variable scorer = locals.getVariable(null, ExecuteReserved.SCORER); + Variable score = locals.getVariable(null, ExecuteReserved.SCORE); + + writer.visitVarInsn(Opcodes.ALOAD, scorer.slot); + writer.invokeVirtual(WriterConstants.SCORER_TYPE, WriterConstants.SCORER_SCORE); + writer.visitInsn(Opcodes.F2D); + writer.visitVarInsn(Opcodes.DSTORE, score.slot); + } + + if (reserved.usesCtx()) { + // if the _ctx value is used, we do this once: + // final Map ctx = input.get("ctx"); + + Variable input = locals.getVariable(null, ExecuteReserved.PARAMS); + Variable ctx = locals.getVariable(null, ExecuteReserved.CTX); + + writer.visitVarInsn(Opcodes.ALOAD, input.slot); + writer.push(ExecuteReserved.CTX); + writer.invokeInterface(MAP_TYPE, MAP_GET); + writer.visitVarInsn(Opcodes.ASTORE, ctx.slot); + } + + if (reserved.getMaxLoopCounter() > 0) { + // if there is infinite loop protection, we do this once: + // int #loop = settings.getMaxLoopCounter() + + Variable loop = locals.getVariable(null, ExecuteReserved.LOOP); + + writer.push(reserved.getMaxLoopCounter()); + writer.visitVarInsn(Opcodes.ISTORE, loop.slot); + } + for (AStatement statement : statements) { statement.write(writer); } @@ -79,4 +208,12 @@ public final class SSource extends AStatement { writer.returnValue(); } } + + public BitSet getExpressions() { + return expressions; + } + + public byte[] getBytes() { + return bytes; + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java index af9d7a65990..db04d622839 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.MethodWriter; /** @@ -38,10 +38,10 @@ public final class SThrow extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { expression.expected = Definition.EXCEPTION_TYPE; - expression.analyze(variables); - expression = expression.cast(variables); + expression.analyze(locals); + expression = expression.cast(locals); methodEscape = true; loopEscape = true; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java index c24c8273dba..42fffc759ce 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; @@ -43,7 +43,7 @@ public final class STry extends AStatement { } @Override - void analyze(Variables variables) { + void analyze(Locals locals) { if (block == null) { throw createError(new IllegalArgumentException("Extraneous try statement.")); } @@ -52,9 +52,9 @@ public final class STry extends AStatement { block.inLoop = inLoop; block.lastLoop = lastLoop; - variables.incrementScope(); - block.analyze(variables); - variables.decrementScope(); + locals.incrementScope(); + block.analyze(locals); + locals.decrementScope(); methodEscape = block.methodEscape; loopEscape = block.loopEscape; @@ -69,9 +69,9 @@ public final class STry extends AStatement { catc.inLoop = inLoop; catc.lastLoop = lastLoop; - variables.incrementScope(); - catc.analyze(variables); - variables.decrementScope(); + locals.incrementScope(); + catc.analyze(locals); + locals.decrementScope(); methodEscape &= catc.methodEscape; loopEscape &= catc.loopEscape; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java index 59c1bb75ee8..20478a55aa0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Locals; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; @@ -30,25 +30,23 @@ import org.elasticsearch.painless.MethodWriter; */ public final class SWhile extends AStatement { - final int maxLoopCounter; AExpression condition; final SBlock block; - public SWhile(Location location, int maxLoopCounter, AExpression condition, SBlock block) { + public SWhile(Location location, AExpression condition, SBlock block) { super(location); - this.maxLoopCounter = maxLoopCounter; this.condition = condition; this.block = block; } @Override - void analyze(Variables variables) { - variables.incrementScope(); + void analyze(Locals locals) { + locals.incrementScope(); condition.expected = Definition.BOOLEAN_TYPE; - condition.analyze(variables); - condition = condition.cast(variables); + condition.analyze(locals); + condition = condition.cast(locals); boolean continuous = false; @@ -68,7 +66,7 @@ public final class SWhile extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block.analyze(locals); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous while loop.")); @@ -84,11 +82,11 @@ public final class SWhile extends AStatement { statementCount = 1; - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; + if (locals.getMaxLoopCounter() > 0) { + loopCounterSlot = locals.getVariable(location, "#loop").slot; } - variables.decrementScope(); + locals.decrementScope(); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java index 71cde33e979..8ab69366f24 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java @@ -49,7 +49,8 @@ * {@link org.elasticsearch.painless.node.IDefLink} - A marker interface for all LDef* (link) nodes. * {@link org.elasticsearch.painless.node.LArrayLength} - Represents an array length field load. * {@link org.elasticsearch.painless.node.LBrace} - Represents an array load/store or defers to possible shortcuts. - * {@link org.elasticsearch.painless.node.LCall} - Represents a method call or defers to a def call. + * {@link org.elasticsearch.painless.node.LCallInvoke} - Represents a method call or defers to a def call. + * {@link org.elasticsearch.painless.node.LCallLocal} - Represents a user-defined call. * {@link org.elasticsearch.painless.node.LCast} - Represents a cast made in a variable/method chain. * {@link org.elasticsearch.painless.node.LDefArray} - Represents an array load/store or shortcut on a def type. (Internal only.) * {@link org.elasticsearch.painless.node.LDefCall} - Represents a method call made on a def type. (Internal only.) @@ -73,6 +74,7 @@ * {@link org.elasticsearch.painless.node.SEach} - Represents a for each loop shortcut for iterables. * {@link org.elasticsearch.painless.node.SExpression} - Represents the top-level node for an expression as a statement. * {@link org.elasticsearch.painless.node.SFor} - Represents a for loop. + * {@link org.elasticsearch.painless.node.SFunction} - Represents a user-defined function. * {@link org.elasticsearch.painless.node.SIf} - Represents an if block. * {@link org.elasticsearch.painless.node.SIfElse} - Represents an if/else block. * {@link org.elasticsearch.painless.node.SReturn} - Represents a return statement. diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java new file mode 100644 index 00000000000..48c09cd4025 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class FunctionTests extends ScriptTestCase { + public void testBasic() { + assertEquals(5, exec("int get() {5;} get()")); + } + + public void testReference() { + assertEquals(5, exec("void get(int[] x) {x[0] = 5;} int[] y = new int[1]; y[0] = 1; get(y); y[0]")); + } + + public void testConcat() { + assertEquals("xyxy", exec("String catcat(String single) {single + single;} catcat('xy')")); + } + + public void testMultiArgs() { + assertEquals(5, exec("int add(int x, int y) {return x + y;} int x = 1, y = 2; add(add(x, x), add(x, y))")); + } + + public void testMultiFuncs() { + assertEquals(1, exec("int add(int x, int y) {return x + y;} int sub(int x, int y) {return x - y;} add(2, sub(3, 4))")); + assertEquals(3, exec("int sub2(int x, int y) {sub(x, y) - y;} int sub(int x, int y) {return x - y;} sub2(5, 1)")); + } + + public void testRecursion() { + assertEquals(55, exec("int fib(int n) {if (n <= 1) return n; else return fib(n-1) + fib(n-2);} fib(10)")); + } + + public void testEmpty() { + Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("void test(int x) {} test()"); + }); + assertTrue(expected.getMessage().contains("Cannot generate an empty function")); + } + + public void testDuplicates() { + Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("void test(int x) {x = 2;} void test(def y) {y = 3;} test()"); + }); + assertTrue(expected.getMessage().contains("Duplicate functions")); + } + + public void testInfiniteLoop() { + Error expected = expectScriptThrows(PainlessError.class, () -> { + exec("void test() {boolean x = true; while (x) {}} test()"); + }); + assertTrue(expected.getMessage().contains( + "The maximum number of statements that can be executed in a loop has been reached.")); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ReservedWordTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ReservedWordTests.java index 9136261c164..e1dbe9db0f7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ReservedWordTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ReservedWordTests.java @@ -30,7 +30,7 @@ public class ReservedWordTests extends ScriptTestCase { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("int _score = 5; return _score;"); }); - assertTrue(expected.getMessage().contains("Variable name [_score] is reserved")); + assertTrue(expected.getMessage().contains("Variable [_score] is reserved")); } /** check that we can't write to _score, its read-only! */ @@ -46,7 +46,7 @@ public class ReservedWordTests extends ScriptTestCase { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("int doc = 5; return doc;"); }); - assertTrue(expected.getMessage().contains("Variable name [doc] is reserved")); + assertTrue(expected.getMessage().contains("Variable [doc] is reserved")); } /** check that we can't write to doc, its read-only! */ @@ -62,7 +62,7 @@ public class ReservedWordTests extends ScriptTestCase { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("int ctx = 5; return ctx;"); }); - assertTrue(expected.getMessage().contains("Variable name [ctx] is reserved")); + assertTrue(expected.getMessage().contains("Variable [ctx] is reserved")); } /** check that we can't write to ctx, its read-only! */ @@ -83,7 +83,7 @@ public class ReservedWordTests extends ScriptTestCase { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("int _value = 5; return _value;"); }); - assertTrue(expected.getMessage().contains("Variable name [_value] is reserved")); + assertTrue(expected.getMessage().contains("Variable [_value] is reserved")); } /** check that we can't write to _value, its read-only! */ diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index 2f3108d6298..fbf06468e75 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -243,7 +243,7 @@ public final class PercolateQuery extends Query implements Accountable { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; + if (sameClassAs(o) == false) return false; PercolateQuery that = (PercolateQuery) o; @@ -254,7 +254,7 @@ public final class PercolateQuery extends Query implements Accountable { @Override public int hashCode() { - int result = super.hashCode(); + int result = classHash(); result = 31 * result + documentType.hashCode(); result = 31 * result + documentSource.hashCode(); return result; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java index 07959db1ff1..4879badc7d3 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java @@ -379,6 +379,16 @@ public class PercolateQueryTests extends ESTestCase { public String toString(String field) { return "custom{" + field + "}"; } + + @Override + public boolean equals(Object obj) { + return sameClassAs(obj); + } + + @Override + public int hashCode() { + return classHash(); + } } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java index f1218414af7..2af05a8455b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; import static org.hamcrest.Matchers.containsString; @@ -39,17 +40,23 @@ import static org.hamcrest.Matchers.containsString; * Tests that indexing from an index back into itself fails the request. */ public class ReindexSameIndexTests extends ESTestCase { - private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder() - .put(index("target", "target_alias", "target_multi"), true) - .put(index("target2", "target_multi"), true) - .put(index("foo"), true) - .put(index("bar"), true) - .put(index("baz"), true) - .put(index("source", "source_multi"), true) - .put(index("source2", "source_multi"), true)).build(); + + private static ClusterState STATE; private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY); private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER); + @BeforeClass + public static void beforeClass() { + STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder() + .put(index("target", "target_alias", "target_multi"), true) + .put(index("target2", "target_multi"), true) + .put(index("foo"), true) + .put(index("bar"), true) + .put(index("baz"), true) + .put(index("source", "source_multi"), true) + .put(index("source2", "source_multi"), true)).build(); + } + public void testObviousCases() throws Exception { fails("target", "target"); fails("target", "foo", "bar", "target", "baz"); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.1.jar.sha1 deleted file mode 100644 index 95dab25e74a..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da08d9919f54efd2e09968d49fe05f6ce3f0c7ce \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0-snapshot-3a57bea.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..013def114d4 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +9cd8cbea5baef18a36bee86846a9ba026d2a02e0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.1.jar.sha1 deleted file mode 100644 index 70f83bf52cc..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77905f563c47994a764a6ab3d5ec198c174567a7 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0-snapshot-3a57bea.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..12c861c24ab --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +86c6d6a367ed658351bd8c8828d6ed647ac79b7e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.1.jar.sha1 deleted file mode 100644 index 8e2f7ab8b98..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ee5d909c269e5da7a92715f41ead88943b38123 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0-snapshot-3a57bea.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..6f571d75537 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +6553bf764a69cd15e4fe1e55661382872795b853 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.1.jar.sha1 deleted file mode 100644 index 981855d5a97..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b7bdbf9efa84f8d8875bd7f1d8734276930b9c3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0-snapshot-3a57bea.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..2ea2d6b9622 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +979817950bc806400d8fa12a609ef215b5bdebd6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.1.jar.sha1 deleted file mode 100644 index 4ff0afee687..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e80e912621276e1009b72c06d5def188976c5426 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0-snapshot-3a57bea.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0-snapshot-3a57bea.jar.sha1 new file mode 100644 index 00000000000..6677cfd3fc4 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.1.0-snapshot-3a57bea.jar.sha1 @@ -0,0 +1 @@ +2a720b647b6a202ec1d8d91db02006ae9539670b \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml new file mode 100644 index 00000000000..5443059135a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml @@ -0,0 +1,75 @@ +--- +"Sliced scroll": + - do: + indices.create: + index: test_sliced_scroll + + - do: + index: + index: test_sliced_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + indices.refresh: {} + + - do: + search: + index: test_sliced_scroll + size: 1 + scroll: 1m + sort: foo + body: + slice: { + id: 0, + max: 3 + } + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + + - do: + clear_scroll: + scroll_id: $scroll_id + + - do: + catch: /query_phase_execution_exception.*The number of slices.*index.max_slices_per_scroll/ + search: + index: test_sliced_scroll + size: 1 + scroll: 1m + body: + slice: { + id: 0, + max: 1025 + } + query: + match_all: {} + + - do: + indices.put_settings: + index: test_sliced_scroll + body: + index.max_slices_per_scroll: 1025 + + - do: + search: + index: test_sliced_scroll + size: 1 + scroll: 1m + body: + slice: { + id: 0, + max: 1025 + } + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + + - do: + clear_scroll: + scroll_id: $scroll_id + diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 2a3eecf4cc8..4e066bc7635 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -108,6 +108,7 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.store.IndicesStore; @@ -1633,7 +1634,11 @@ public abstract class ESIntegTestCase extends ESTestCase { .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") .put("script.stored", "true") .put("script.inline", "true") - // wait short time for other active shards before actually deleting, default 30s not needed in tests + // by default we never cache below 10k docs in a segment, + // bypass this limit so that caching gets some testing in + // integration tests that usually create few documents + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) + // wait short time for other active shards before actually deleting, default 30s not needed in tests .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); return builder.build(); }