Merge branch 'master' into capturingReferences
This commit is contained in:
commit
9e0a70c4b2
|
@ -482,7 +482,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltCacheFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltTokenFilters.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]breaker[/\\]HierarchyCircuitBreakerService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cluster[/\\]IndicesClusterStateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCache.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCacheListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]flush[/\\]ShardsSyncedFlushResult.java" checks="LineLength" />
|
||||
|
|
|
@ -31,5 +31,3 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
|||
|
||||
@defaultMessage Soon to be removed
|
||||
org.apache.lucene.document.FieldType#numericType()
|
||||
|
||||
org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 5.0.0
|
||||
lucene = 6.0.1
|
||||
lucene = 6.1.0-snapshot-3a57bea
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and
|
||||
* LUCENE-7234 are released.
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.1
|
||||
@SuppressForbidden(reason="uses InetAddress.getHostAddress")
|
||||
public final class XInetAddressPoint {
|
||||
|
||||
private XInetAddressPoint() {}
|
||||
|
||||
/** The minimum value that an ip address can hold. */
|
||||
public static final InetAddress MIN_VALUE;
|
||||
/** The maximum value that an ip address can hold. */
|
||||
public static final InetAddress MAX_VALUE;
|
||||
static {
|
||||
MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]);
|
||||
byte[] maxValueBytes = new byte[InetAddressPoint.BYTES];
|
||||
Arrays.fill(maxValueBytes, (byte) 0xFF);
|
||||
MAX_VALUE = InetAddressPoint.decode(maxValueBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately greater than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MAX_VALUE maximum ip address}
|
||||
*/
|
||||
public static InetAddress nextUp(InetAddress address) {
|
||||
if (address.equals(MAX_VALUE)) {
|
||||
throw new ArithmeticException("Overflow: there is no greater InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextUpBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes);
|
||||
return InetAddressPoint.decode(nextUpBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately less than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MIN_VALUE minimum ip address}
|
||||
*/
|
||||
public static InetAddress nextDown(InetAddress address) {
|
||||
if (address.equals(MIN_VALUE)) {
|
||||
throw new ArithmeticException("Underflow: there is no smaller InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextDownBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes);
|
||||
return InetAddressPoint.decode(nextDownBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a prefix query for matching a CIDR network range.
|
||||
*
|
||||
* @param field field name. must not be {@code null}.
|
||||
* @param value any host address
|
||||
* @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4
|
||||
* addresses.
|
||||
* @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid.
|
||||
* @return a query matching documents with addresses contained within this network
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.0.1
|
||||
public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) {
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("InetAddress must not be null");
|
||||
}
|
||||
if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) {
|
||||
throw new IllegalArgumentException("illegal prefixLength '" + prefixLength
|
||||
+ "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges");
|
||||
}
|
||||
// create the lower value by zeroing out the host portion, upper value by filling it with all ones.
|
||||
byte lower[] = value.getAddress();
|
||||
byte upper[] = value.getAddress();
|
||||
for (int i = prefixLength; i < 8 * lower.length; i++) {
|
||||
int m = 1 << (7 - (i & 7));
|
||||
lower[i >> 3] &= ~m;
|
||||
upper[i >> 3] |= m;
|
||||
}
|
||||
try {
|
||||
return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper));
|
||||
} catch (UnknownHostException e) {
|
||||
throw new AssertionError(e); // values are coming from InetAddress
|
||||
}
|
||||
}
|
||||
}
|
|
@ -283,7 +283,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!super.equals(o)) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
BlendedTermQuery that = (BlendedTermQuery) o;
|
||||
return Arrays.equals(equalsTerms(), that.equalsTerms());
|
||||
|
@ -291,7 +291,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms()));
|
||||
return Objects.hash(classHash(), Arrays.hashCode(equalsTerms()));
|
||||
}
|
||||
|
||||
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) {
|
||||
|
|
|
@ -44,12 +44,12 @@ public final class MinDocQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), minDoc);
|
||||
return Objects.hash(classHash(), minDoc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
MinDocQuery that = (MinDocQuery) obj;
|
||||
|
|
|
@ -63,9 +63,6 @@ import org.elasticsearch.common.io.PathUtils;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -622,8 +619,12 @@ public long ramBytesUsed() {
|
|||
Set<BytesRef> seenSurfaceForms = new HashSet<>();
|
||||
|
||||
int dedup = 0;
|
||||
while (reader.read(scratch)) {
|
||||
input.reset(scratch.bytes(), 0, scratch.length());
|
||||
while (true) {
|
||||
BytesRef bytes = reader.next();
|
||||
if (bytes == null) {
|
||||
break;
|
||||
}
|
||||
input.reset(bytes.bytes, bytes.offset, bytes.length);
|
||||
short analyzedLength = input.readShort();
|
||||
analyzed.grow(analyzedLength+2);
|
||||
input.readBytes(analyzed.bytes(), 0, analyzedLength);
|
||||
|
@ -631,13 +632,13 @@ public long ramBytesUsed() {
|
|||
|
||||
long cost = input.readInt();
|
||||
|
||||
surface.bytes = scratch.bytes();
|
||||
surface.bytes = bytes.bytes;
|
||||
if (hasPayloads) {
|
||||
surface.length = input.readShort();
|
||||
surface.offset = input.getPosition();
|
||||
} else {
|
||||
surface.offset = input.getPosition();
|
||||
surface.length = scratch.length() - surface.offset;
|
||||
surface.length = bytes.length - surface.offset;
|
||||
}
|
||||
|
||||
if (previousAnalyzed == null) {
|
||||
|
@ -679,11 +680,11 @@ public long ramBytesUsed() {
|
|||
builder.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
|
||||
} else {
|
||||
int payloadOffset = input.getPosition() + surface.length;
|
||||
int payloadLength = scratch.length() - payloadOffset;
|
||||
int payloadLength = bytes.length - payloadOffset;
|
||||
BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
|
||||
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
|
||||
br.bytes[surface.length] = (byte) payloadSep;
|
||||
System.arraycopy(scratch.bytes(), payloadOffset, br.bytes, surface.length+1, payloadLength);
|
||||
System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
|
||||
br.length = br.bytes.length;
|
||||
builder.add(scratchInts.get(), outputs.newPair(cost, br));
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class Version {
|
|||
public static final int V_5_0_0_alpha3_ID = 5000003;
|
||||
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_1);
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
|
||||
static {
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.search.profile.ProfileShardResult;
|
|||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
|
@ -169,7 +168,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
|
|||
*
|
||||
* @return The profile results or an empty map
|
||||
*/
|
||||
public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
|
||||
public @Nullable Map<String, ProfileShardResult> getProfileResults() {
|
||||
return internalResponse.profile();
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,13 @@ public class JavaVersion implements Comparable<JavaVersion> {
|
|||
}
|
||||
|
||||
private JavaVersion(List<Integer> version) {
|
||||
if (version.size() >= 2
|
||||
&& version.get(0).intValue() == 1
|
||||
&& version.get(1).intValue() == 8) {
|
||||
// for Java 8 there is ambiguity since both 1.8 and 8 are supported,
|
||||
// so we rewrite the former to the latter
|
||||
version = new ArrayList<>(version.subList(1, version.size()));
|
||||
}
|
||||
this.version = Collections.unmodifiableList(version);
|
||||
}
|
||||
|
||||
|
@ -75,6 +82,19 @@ public class JavaVersion implements Comparable<JavaVersion> {
|
|||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null || o.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((JavaVersion) o) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return version.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining("."));
|
||||
|
|
|
@ -148,18 +148,11 @@ public class ClusterChangedEvent {
|
|||
* has changed between the previous cluster state and the new cluster state.
|
||||
* Note that this is an object reference equality test, not an equals test.
|
||||
*/
|
||||
public boolean indexMetaDataChanged(IndexMetaData current) {
|
||||
MetaData previousMetaData = previousState.metaData();
|
||||
if (previousMetaData == null) {
|
||||
return true;
|
||||
}
|
||||
IndexMetaData previousIndexMetaData = previousMetaData.index(current.getIndex());
|
||||
public static boolean indexMetaDataChanged(IndexMetaData metaData1, IndexMetaData metaData2) {
|
||||
assert metaData1 != null && metaData2 != null;
|
||||
// no need to check on version, since disco modules will make sure to use the
|
||||
// same instance if its a version match
|
||||
if (previousIndexMetaData == current) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return metaData1 != metaData2;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -58,13 +59,12 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler());
|
||||
}
|
||||
|
||||
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
if (nodes.getMasterNode() == null) {
|
||||
public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) {
|
||||
if (masterNode == null) {
|
||||
logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(nodes.getMasterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
}
|
||||
|
||||
private class NodeMappingRefreshTransportHandler implements TransportRequestHandler<NodeMappingRefreshRequest> {
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -32,7 +30,7 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
|
|||
*/
|
||||
@Override
|
||||
public String getBase64UUID() {
|
||||
return getBase64UUID(SecureRandomHolder.INSTANCE);
|
||||
return getBase64UUID(Randomness.getSecure());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -49,12 +47,13 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
|
|||
* stamp (bits 4 through 7 of the time_hi_and_version field).*/
|
||||
randomBytes[6] &= 0x0f; /* clear the 4 most significant bits for the version */
|
||||
randomBytes[6] |= 0x40; /* set the version to 0100 / 0x40 */
|
||||
|
||||
/* Set the variant:
|
||||
|
||||
/* Set the variant:
|
||||
* The high field of th clock sequence multiplexed with the variant.
|
||||
* We set only the MSB of the variant*/
|
||||
randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
|
||||
randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,9 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.NoSuchProviderException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
@ -44,6 +47,7 @@ import java.util.concurrent.ThreadLocalRandom;
|
|||
* DiscoveryService#NODE_ID_SEED_SETTING)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
|
||||
private static final Method currentMethod;
|
||||
private static final Method getRandomMethod;
|
||||
|
||||
|
@ -72,7 +76,7 @@ public final class Randomness {
|
|||
* @param setting the setting to access the seed
|
||||
* @return a reproducible source of randomness
|
||||
*/
|
||||
public static Random get(Settings settings, Setting<Long> setting) {
|
||||
public static Random get(final Settings settings, final Setting<Long> setting) {
|
||||
if (setting.exists(settings)) {
|
||||
return new Random(setting.get(settings));
|
||||
} else {
|
||||
|
@ -98,7 +102,7 @@ public final class Randomness {
|
|||
public static Random get() {
|
||||
if (currentMethod != null && getRandomMethod != null) {
|
||||
try {
|
||||
Object randomizedContext = currentMethod.invoke(null);
|
||||
final Object randomizedContext = currentMethod.invoke(null);
|
||||
return (Random) getRandomMethod.invoke(randomizedContext);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
// unexpected, bail
|
||||
|
@ -109,13 +113,42 @@ public final class Randomness {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a source of randomness that is reproducible when
|
||||
* running under the Elasticsearch test suite, and otherwise
|
||||
* produces a non-reproducible source of secure randomness.
|
||||
* Reproducible sources of randomness are created when the system
|
||||
* property "tests.seed" is set and the security policy allows
|
||||
* reading this system property. Otherwise, non-reproducible
|
||||
* sources of secure randomness are created.
|
||||
*
|
||||
* @return a source of randomness
|
||||
* @throws IllegalStateException if running tests but was not able
|
||||
* to acquire an instance of Random from
|
||||
* RandomizedContext or tests are
|
||||
* running but tests.seed is not set
|
||||
*/
|
||||
public static Random getSecure() {
|
||||
if (currentMethod != null && getRandomMethod != null) {
|
||||
return get();
|
||||
} else {
|
||||
return getSecureRandomWithoutSeed();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "ThreadLocalRandom is okay when not running tests")
|
||||
private static Random getWithoutSeed() {
|
||||
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
|
||||
return ThreadLocalRandom.current();
|
||||
}
|
||||
|
||||
public static void shuffle(List<?> list) {
|
||||
private static SecureRandom getSecureRandomWithoutSeed() {
|
||||
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
|
||||
return SecureRandomHolder.INSTANCE;
|
||||
}
|
||||
|
||||
public static void shuffle(final List<?> list) {
|
||||
Collections.shuffle(list, get());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.elasticsearch.common.geo;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
|
||||
/**
|
||||
|
@ -39,7 +39,7 @@ public class GeoHashUtils {
|
|||
|
||||
/** maximum precision for geohash strings */
|
||||
public static final int PRECISION = 12;
|
||||
private static final short MORTON_OFFSET = (GeoEncodingUtils.BITS<<1) - (PRECISION*5);
|
||||
private static final short MORTON_OFFSET = (GeoPointField.BITS<<1) - (PRECISION*5);
|
||||
|
||||
// No instance:
|
||||
private GeoHashUtils() {
|
||||
|
@ -51,7 +51,7 @@ public class GeoHashUtils {
|
|||
public static final long longEncode(final double lon, final double lat, final int level) {
|
||||
// shift to appropriate level
|
||||
final short msf = (short)(((12 - level) * 5) + MORTON_OFFSET);
|
||||
return ((BitUtil.flipFlop(GeoEncodingUtils.mortonHash(lat, lon)) >>> msf) << 4) | level;
|
||||
return ((BitUtil.flipFlop(GeoPointField.encodeLatLon(lat, lon)) >>> msf) << 4) | level;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -117,7 +117,7 @@ public class GeoHashUtils {
|
|||
*/
|
||||
public static final String stringEncode(final double lon, final double lat, final int level) {
|
||||
// convert to geohashlong
|
||||
final long ghLong = fromMorton(GeoEncodingUtils.mortonHash(lat, lon), level);
|
||||
final long ghLong = fromMorton(GeoPointField.encodeLatLon(lat, lon), level);
|
||||
return stringEncode(ghLong);
|
||||
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ public class GeoHashUtils {
|
|||
|
||||
StringBuilder geoHash = new StringBuilder();
|
||||
short precision = 0;
|
||||
final short msf = (GeoEncodingUtils.BITS<<1)-5;
|
||||
final short msf = (GeoPointField.BITS<<1)-5;
|
||||
long mask = 31L<<msf;
|
||||
do {
|
||||
geoHash.append(BASE_32[(int)((mask & hashedVal)>>>(msf-(precision*5)))]);
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode;
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLat;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLon;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -84,14 +83,14 @@ public final class GeoPoint {
|
|||
}
|
||||
|
||||
public GeoPoint resetFromIndexHash(long hash) {
|
||||
lon = mortonUnhashLon(hash);
|
||||
lat = mortonUnhashLat(hash);
|
||||
lon = GeoPointField.decodeLongitude(hash);
|
||||
lat = GeoPointField.decodeLatitude(hash);
|
||||
return this;
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(String geohash) {
|
||||
final long hash = mortonEncode(geohash);
|
||||
return this.reset(mortonUnhashLat(hash), mortonUnhashLon(hash));
|
||||
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(long geohashLong) {
|
||||
|
@ -164,8 +163,4 @@ public final class GeoPoint {
|
|||
public static GeoPoint fromGeohash(long geohashLong) {
|
||||
return new GeoPoint().resetFromGeoHash(geohashLong);
|
||||
}
|
||||
|
||||
public static GeoPoint fromIndexLong(long indexLong) {
|
||||
return new GeoPoint().resetFromIndexHash(indexLong);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoDistanceUtils.maxRadialDistanceMeters;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -67,6 +66,9 @@ public class GeoUtils {
|
|||
/** Earth ellipsoid polar distance in meters */
|
||||
public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS;
|
||||
|
||||
/** rounding error for quantized latitude and longitude values */
|
||||
public static final double TOLERANCE = 1E-6;
|
||||
|
||||
/** Returns the minimum between the provided distance 'initialRadius' and the
|
||||
* maximum distance/radius from the point 'center' before overlapping
|
||||
**/
|
||||
|
@ -468,6 +470,14 @@ public class GeoUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */
|
||||
public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) {
|
||||
if (Math.abs(centerLat) == MAX_LAT) {
|
||||
return SloppyMath.haversinMeters(centerLat, centerLon, 0, centerLon);
|
||||
}
|
||||
return SloppyMath.haversinMeters(centerLat, centerLon, centerLat, (MAX_LON + centerLon) % 360);
|
||||
}
|
||||
|
||||
private GeoUtils() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.SmallFloat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -63,6 +64,19 @@ public final class AllTermQuery extends Query {
|
|||
this.term = term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
return Objects.equals(term, ((AllTermQuery) obj).term);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * classHash() + term.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
Query rewritten = super.rewrite(reader);
|
||||
|
|
|
@ -66,4 +66,14 @@ public class MatchNoDocsQuery extends Query {
|
|||
public String toString(String field) {
|
||||
return "MatchNoDocsQuery[\"" + reason + "\"]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return sameClassAs(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return classHash();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,14 +84,14 @@ public class MoreLikeThisQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), boostTerms, boostTermsFactor, Arrays.hashCode(likeText),
|
||||
return Objects.hash(classHash(), boostTerms, boostTermsFactor, Arrays.hashCode(likeText),
|
||||
maxDocFreq, maxQueryTerms, maxWordLen, minDocFreq, minTermFrequency, minWordLen,
|
||||
Arrays.hashCode(moreLikeFields), minimumShouldMatch, stopWords);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
MoreLikeThisQuery other = (MoreLikeThisQuery) obj;
|
||||
|
|
|
@ -238,7 +238,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (super.equals(o) == false) {
|
||||
if (sameClassAs(o) == false) {
|
||||
return false;
|
||||
}
|
||||
MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o;
|
||||
|
@ -252,7 +252,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return super.hashCode()
|
||||
return classHash()
|
||||
^ slop
|
||||
^ termArraysHashCode()
|
||||
^ positions.hashCode();
|
||||
|
|
|
@ -355,7 +355,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (super.equals(o) == false) {
|
||||
if (sameClassAs(o) == false) {
|
||||
return false;
|
||||
}
|
||||
FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o;
|
||||
|
@ -367,6 +367,6 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), subQuery, maxBoost, combineFunction, minScore, scoreMode, Arrays.hashCode(filterFunctions));
|
||||
return Objects.hash(classHash(), subQuery, maxBoost, combineFunction, minScore, scoreMode, Arrays.hashCode(filterFunctions));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -210,7 +210,7 @@ public class FunctionScoreQuery extends Query {
|
|||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (super.equals(o) == false) {
|
||||
if (sameClassAs(o) == false) {
|
||||
return false;
|
||||
}
|
||||
FunctionScoreQuery other = (FunctionScoreQuery) o;
|
||||
|
@ -221,6 +221,6 @@ public class FunctionScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), subQuery.hashCode(), function, combineFunction, minScore, maxBoost);
|
||||
return Objects.hash(classHash(), subQuery.hashCode(), function, combineFunction, minScore, maxBoost);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -181,6 +181,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
|
|
|
@ -116,6 +116,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
IndexSettings.ALLOW_UNMAPPED,
|
||||
IndexSettings.INDEX_CHECK_ON_STARTUP,
|
||||
IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
|
||||
IndexSettings.MAX_SLICES_PER_SCROLL,
|
||||
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
IndexSettings.INDEX_GC_DELETES_SETTING,
|
||||
IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.AliasFilterParsingException;
|
||||
import org.elasticsearch.indices.InvalidAliasNameException;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -93,7 +94,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
|
||||
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
|
||||
|
||||
private final IndexEventListener eventListener;
|
||||
private final AnalysisService analysisService;
|
||||
|
@ -184,8 +185,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
/**
|
||||
* Return the shard with the provided id, or null if there is no such shard.
|
||||
*/
|
||||
@Nullable
|
||||
public IndexShard getShardOrNull(int shardId) {
|
||||
@Override
|
||||
public @Nullable IndexShard getShardOrNull(int shardId) {
|
||||
return shards.get(shardId);
|
||||
}
|
||||
|
||||
|
@ -359,6 +360,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(indexSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void removeShard(int shardId, String reason) {
|
||||
final ShardId sId = new ShardId(index(), shardId);
|
||||
final IndexShard indexShard;
|
||||
|
@ -470,6 +472,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return searchOperationListeners;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
|
||||
return mapperService().updateMapping(indexMetaData);
|
||||
}
|
||||
|
||||
private class StoreCloseListener implements Store.OnClose {
|
||||
private final ShardId shardId;
|
||||
private final boolean ownsShard;
|
||||
|
@ -617,6 +624,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return indexSettings.getIndexMetaData();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void updateMetaData(final IndexMetaData metadata) {
|
||||
final Translog.Durability oldTranslogDurability = indexSettings.getTranslogDurability();
|
||||
if (indexSettings.updateIndexMetaData(metadata)) {
|
||||
|
|
|
@ -121,6 +121,12 @@ public final class IndexSettings {
|
|||
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
/**
|
||||
* The maximum number of slices allowed in a scroll request
|
||||
*/
|
||||
public static final Setting<Integer> MAX_SLICES_PER_SCROLL = Setting.intSetting("index.max_slices_per_scroll",
|
||||
1024, 1, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
private final Index index;
|
||||
private final Version version;
|
||||
private final ESLogger logger;
|
||||
|
@ -154,6 +160,11 @@ public final class IndexSettings {
|
|||
* The maximum number of refresh listeners allows on this shard.
|
||||
*/
|
||||
private volatile int maxRefreshListeners;
|
||||
/**
|
||||
* The maximum number of slices allowed in a scroll request.
|
||||
*/
|
||||
private volatile int maxSlicesPerScroll;
|
||||
|
||||
|
||||
/**
|
||||
* Returns the default search field for this index.
|
||||
|
@ -239,6 +250,7 @@ public final class IndexSettings {
|
|||
maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING);
|
||||
TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING);
|
||||
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
|
||||
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
|
||||
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
||||
assert indexNameMatcher.test(indexMetaData.getIndex().getName());
|
||||
|
||||
|
@ -262,6 +274,7 @@ public final class IndexSettings {
|
|||
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
|
||||
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
|
||||
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
|
||||
}
|
||||
|
||||
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
||||
|
@ -391,7 +404,7 @@ public final class IndexSettings {
|
|||
*
|
||||
* @return <code>true</code> iff any setting has been updated otherwise <code>false</code>.
|
||||
*/
|
||||
synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) {
|
||||
public synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) {
|
||||
final Settings newSettings = indexMetaData.getSettings();
|
||||
if (version.equals(Version.indexCreated(newSettings)) == false) {
|
||||
throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings));
|
||||
|
@ -521,5 +534,16 @@ public final class IndexSettings {
|
|||
this.maxRefreshListeners = maxRefreshListeners;
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of slices allowed in a scroll request.
|
||||
*/
|
||||
public int getMaxSlicesPerScroll() {
|
||||
return maxSlicesPerScroll;
|
||||
}
|
||||
|
||||
private void setMaxSlicesPerScroll(int value) {
|
||||
this.maxSlicesPerScroll = value;
|
||||
}
|
||||
|
||||
IndexScopedSettings getScopedSettings() { return scopedSettings;}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.analysis;
|
|||
|
||||
import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -35,10 +36,11 @@ public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
|
|||
public PatternReplaceCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name);
|
||||
|
||||
if (!Strings.hasLength(settings.get("pattern"))) {
|
||||
String sPattern = settings.get("pattern");
|
||||
if (!Strings.hasLength(sPattern)) {
|
||||
throw new IllegalArgumentException("pattern is missing for [" + name + "] char filter of type 'pattern_replace'");
|
||||
}
|
||||
pattern = Pattern.compile(settings.get("pattern"));
|
||||
pattern = Regex.compile(sPattern, settings.get("flags"));
|
||||
replacement = settings.get("replacement", ""); // when not set or set to "", use "".
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.index.PostingsEnum;
|
|||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -426,7 +425,7 @@ public final class OrdinalsBuilder implements Closeable {
|
|||
protected AcceptStatus accept(BytesRef term) throws IOException {
|
||||
// accept only the max resolution terms
|
||||
// todo is this necessary?
|
||||
return GeoEncodingUtils.getPrefixCodedShift(term) == GeoPointField.PRECISION_STEP * 4 ?
|
||||
return GeoPointField.getPrefixCodedShift(term) == GeoPointField.PRECISION_STEP * 4 ?
|
||||
AcceptStatus.YES : AcceptStatus.END;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
|
@ -58,7 +57,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
|
|||
return null;
|
||||
}
|
||||
if (termEncoding == GeoPointField.TermEncoding.PREFIX) {
|
||||
return GeoEncodingUtils.prefixCodedToGeoCoded(term);
|
||||
return GeoPointField.prefixCodedToGeoCoded(term);
|
||||
} else if (termEncoding == GeoPointField.TermEncoding.NUMERIC) {
|
||||
return LegacyNumericUtils.prefixCodedToLong(term);
|
||||
}
|
||||
|
|
|
@ -21,10 +21,13 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -32,6 +35,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
|
@ -183,6 +187,45 @@ public class MapperService extends AbstractIndexComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
|
||||
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
|
||||
// go over and add the relevant mappings (or update them)
|
||||
boolean requireRefresh = false;
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMd = cursor.value;
|
||||
String mappingType = mappingMd.type();
|
||||
CompressedXContent mappingSource = mappingMd.source();
|
||||
// refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
|
||||
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
|
||||
// merge version of it, which it does when refreshing the mappings), and warn log it.
|
||||
try {
|
||||
DocumentMapper existingMapper = documentMapper(mappingType);
|
||||
|
||||
if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) {
|
||||
String op = existingMapper == null ? "adding" : "updating";
|
||||
if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
|
||||
logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
|
||||
} else {
|
||||
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op,
|
||||
mappingType);
|
||||
}
|
||||
merge(mappingType, mappingSource, MergeReason.MAPPING_RECOVERY, true);
|
||||
if (!documentMapper(mappingType).mappingSource().equals(mappingSource)) {
|
||||
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(),
|
||||
mappingType, mappingSource, documentMapper(mappingType).mappingSource());
|
||||
requireRefresh = true;
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index(), mappingType, mappingSource);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return requireRefresh;
|
||||
}
|
||||
|
||||
//TODO: make this atomic
|
||||
public void merge(Map<String, Map<String, Object>> mappings, boolean updateAllTypes) throws MapperParsingException {
|
||||
// first, add the default mapping
|
||||
|
|
|
@ -204,7 +204,7 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!super.equals(o)) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
LateParsingQuery that = (LateParsingQuery) o;
|
||||
if (includeLower != that.includeLower) return false;
|
||||
|
@ -218,7 +218,7 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone);
|
||||
return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -213,7 +213,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
|
|||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!super.equals(o)) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
LateParsingQuery that = (LateParsingQuery) o;
|
||||
if (includeLower != that.includeLower) return false;
|
||||
|
@ -227,7 +227,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone);
|
||||
return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -239,6 +239,13 @@ public class TypeParsers {
|
|||
Map.Entry<String, Object> entry = iterator.next();
|
||||
final String propName = entry.getKey();
|
||||
final Object propNode = entry.getValue();
|
||||
if (false == propName.equals("null_value") && propNode == null) {
|
||||
/*
|
||||
* No properties *except* null_value are allowed to have null. So we catch it here and tell the user something useful rather
|
||||
* than send them a null pointer exception later.
|
||||
*/
|
||||
throw new MapperParsingException("[" + propName + "] must not have a [null] value");
|
||||
}
|
||||
if (propName.equals("store")) {
|
||||
builder.store(parseStore(name, propNode.toString(), parserContext));
|
||||
iterator.remove();
|
||||
|
|
|
@ -195,7 +195,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
TypeQuery that = (TypeQuery) obj;
|
||||
|
@ -204,7 +204,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + type.hashCode();
|
||||
return 31 * classHash() + type.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.XInetAddressPoint;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.XPointValues;
|
||||
|
@ -176,7 +175,7 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
|
|||
if (fields.length == 2) {
|
||||
InetAddress address = InetAddresses.forString(fields[0]);
|
||||
int prefixLength = Integer.parseInt(fields[1]);
|
||||
return XInetAddressPoint.newPrefixQuery(name(), address, prefixLength);
|
||||
return InetAddressPoint.newPrefixQuery(name(), address, prefixLength);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Expected [ip/prefix] but was [" + term + "]");
|
||||
}
|
||||
|
@ -191,27 +190,27 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
|
|||
failIfNotIndexed();
|
||||
InetAddress lower;
|
||||
if (lowerTerm == null) {
|
||||
lower = XInetAddressPoint.MIN_VALUE;
|
||||
lower = InetAddressPoint.MIN_VALUE;
|
||||
} else {
|
||||
lower = parse(lowerTerm);
|
||||
if (includeLower == false) {
|
||||
if (lower.equals(XInetAddressPoint.MAX_VALUE)) {
|
||||
if (lower.equals(InetAddressPoint.MAX_VALUE)) {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
lower = XInetAddressPoint.nextUp(lower);
|
||||
lower = InetAddressPoint.nextUp(lower);
|
||||
}
|
||||
}
|
||||
|
||||
InetAddress upper;
|
||||
if (upperTerm == null) {
|
||||
upper = XInetAddressPoint.MAX_VALUE;
|
||||
upper = InetAddressPoint.MAX_VALUE;
|
||||
} else {
|
||||
upper = parse(upperTerm);
|
||||
if (includeUpper == false) {
|
||||
if (upper.equals(XInetAddressPoint.MIN_VALUE)) {
|
||||
if (upper.equals(InetAddressPoint.MIN_VALUE)) {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
upper = XInetAddressPoint.nextDown(upper);
|
||||
upper = InetAddressPoint.nextDown(upper);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.search.MatchNoDocsQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.XGeoPointDistanceRangeQuery;
|
||||
import org.apache.lucene.spatial.util.GeoDistanceUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
|
@ -48,8 +47,6 @@ import java.util.Locale;
|
|||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE;
|
||||
|
||||
public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistanceRangeQueryBuilder> {
|
||||
|
||||
public static final String NAME = "geo_distance_range";
|
||||
|
@ -354,7 +351,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
toValue = geoDistance.normalize(toValue, DistanceUnit.DEFAULT);
|
||||
}
|
||||
} else {
|
||||
toValue = GeoDistanceUtils.maxRadialDistanceMeters(point.lat(), point.lon());
|
||||
toValue = GeoUtils.maxRadialDistanceMeters(point.lat(), point.lon());
|
||||
}
|
||||
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
|
@ -371,8 +368,8 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
|
||||
return new XGeoPointDistanceRangeQuery(fieldType.name(), encoding, point.lat(), point.lon(),
|
||||
(includeLower) ? fromValue : fromValue + TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - TOLERANCE);
|
||||
(includeLower) ? fromValue : fromValue + GeoUtils.TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - GeoUtils.TOLERANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -411,7 +411,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (!super.equals(o)) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
LateParsingQuery that = (LateParsingQuery) o;
|
||||
|
||||
|
@ -425,7 +425,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), toQuery, innerQuery, minChildren, maxChildren, parentType, scoreMode);
|
||||
return Objects.hash(classHash(), toQuery, innerQuery, minChildren, maxChildren, parentType, scoreMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -184,7 +184,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
|
|||
public boolean equals(Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (!super.equals(obj))
|
||||
if (sameClassAs(obj) == false)
|
||||
return false;
|
||||
ScriptQuery other = (ScriptQuery) obj;
|
||||
return Objects.equals(script, other.script);
|
||||
|
@ -192,7 +192,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), script);
|
||||
return Objects.hash(classHash(), script);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -190,7 +190,7 @@ public class GeoDistanceRangeQuery extends Query {
|
|||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (super.equals(o) == false) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) o;
|
||||
|
||||
|
@ -212,7 +212,7 @@ public class GeoDistanceRangeQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
int result = classHash();
|
||||
long temp;
|
||||
temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L;
|
||||
result = 31 * result + Long.hashCode(temp);
|
||||
|
|
|
@ -111,7 +111,7 @@ public class GeoPolygonQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
GeoPolygonQuery that = (GeoPolygonQuery) obj;
|
||||
|
@ -121,7 +121,7 @@ public class GeoPolygonQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = super.hashCode();
|
||||
int h = classHash();
|
||||
h = 31 * h + indexFieldData.getFieldName().hashCode();
|
||||
h = 31 * h + Arrays.hashCode(points);
|
||||
return h;
|
||||
|
|
|
@ -84,7 +84,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
InMemoryGeoBoundingBoxQuery other = (InMemoryGeoBoundingBoxQuery) obj;
|
||||
|
@ -95,7 +95,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), fieldName(), topLeft, bottomRight);
|
||||
return Objects.hash(classHash(), fieldName(), topLeft, bottomRight);
|
||||
}
|
||||
|
||||
private static class Meridian180GeoBoundingBoxBits implements Bits {
|
||||
|
|
|
@ -62,13 +62,6 @@ public class CommitPoint {
|
|||
public String checksum() {
|
||||
return checksum;
|
||||
}
|
||||
|
||||
public boolean isSame(StoreFileMetaData md) {
|
||||
if (checksum == null || md.checksum() == null) {
|
||||
return false;
|
||||
}
|
||||
return length == md.length() && checksum.equals(md.checksum());
|
||||
}
|
||||
}
|
||||
|
||||
public static enum Type {
|
||||
|
|
|
@ -37,8 +37,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
|||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RestoreSource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
@ -108,6 +106,7 @@ import org.elasticsearch.index.warmer.ShardIndexWarmerService;
|
|||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTargetService;
|
||||
|
@ -136,7 +135,7 @@ import java.util.function.Consumer;
|
|||
import java.util.function.BiConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class IndexShard extends AbstractIndexShardComponent {
|
||||
public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard {
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final MapperService mapperService;
|
||||
|
@ -338,6 +337,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
/**
|
||||
* Returns the latest cluster routing entry received with this shard.
|
||||
*/
|
||||
@Override
|
||||
public ShardRouting routingEntry() {
|
||||
return this.shardRouting;
|
||||
}
|
||||
|
@ -348,13 +348,12 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
|
||||
/**
|
||||
* Updates the shards routing entry. This mutate the shards internal state depending
|
||||
* on the changes that get introduced by the new routing value. This method will persist shard level metadata
|
||||
* unless explicitly disabled.
|
||||
* on the changes that get introduced by the new routing value. This method will persist shard level metadata.
|
||||
*
|
||||
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
|
||||
* @throws IOException if shard state could not be persisted
|
||||
*/
|
||||
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) throws IOException {
|
||||
public void updateRoutingEntry(final ShardRouting newRouting) throws IOException {
|
||||
final ShardRouting currentRouting = this.shardRouting;
|
||||
if (!newRouting.shardId().equals(shardId())) {
|
||||
throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
|
||||
|
@ -408,9 +407,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
this.shardRouting = newRouting;
|
||||
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
|
||||
if (persistState) {
|
||||
persistMetadata(newRouting, currentRouting);
|
||||
}
|
||||
persistMetadata(newRouting, currentRouting);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -589,7 +586,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
*/
|
||||
public void refresh(String source) {
|
||||
verifyNotClosed();
|
||||
|
||||
|
||||
if (canIndex()) {
|
||||
long bytes = getEngine().getIndexBufferRAMBytesUsed();
|
||||
writingBytes.addAndGet(bytes);
|
||||
|
@ -1370,35 +1367,36 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return this.currentEngineReference.get();
|
||||
}
|
||||
|
||||
public void startRecovery(DiscoveryNode localNode, DiscoveryNode sourceNode, RecoveryTargetService recoveryTargetService,
|
||||
public void startRecovery(RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
|
||||
RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
|
||||
BiConsumer<String, MappingMetaData> mappingUpdateConsumer, IndicesService indicesService) {
|
||||
final RestoreSource restoreSource = shardRouting.restoreSource();
|
||||
|
||||
if (shardRouting.isPeerRecovery()) {
|
||||
assert sourceNode != null : "peer recovery started but sourceNode is null";
|
||||
// we don't mark this one as relocated at the end.
|
||||
// For primaries: requests in any case are routed to both when its relocating and that way we handle
|
||||
// the edge case where its mark as relocated, and we might need to roll it back...
|
||||
// For replicas: we are recovering a backup from a primary
|
||||
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
|
||||
RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(), type, sourceNode, localNode);
|
||||
try {
|
||||
markAsRecovering("from " + sourceNode, recoveryState);
|
||||
recoveryTargetService.startRecovery(this, type, sourceNode, recoveryListener);
|
||||
} catch (Throwable e) {
|
||||
failShard("corrupted preexisting index", e);
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, e), true);
|
||||
}
|
||||
} else if (restoreSource == null) {
|
||||
// recover from filesystem store
|
||||
|
||||
IndexMetaData indexMetaData = indexSettings().getIndexMetaData();
|
||||
Index mergeSourceIndex = indexMetaData.getMergeSourceIndex();
|
||||
final boolean recoverFromLocalShards = mergeSourceIndex != null && shardRouting.allocatedPostIndexCreate(indexMetaData) == false && shardRouting.primary();
|
||||
final RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(),
|
||||
recoverFromLocalShards ? RecoveryState.Type.LOCAL_SHARDS : RecoveryState.Type.STORE, localNode, localNode);
|
||||
if (recoverFromLocalShards) {
|
||||
BiConsumer<String, MappingMetaData> mappingUpdateConsumer,
|
||||
IndicesService indicesService) {
|
||||
switch (recoveryState.getType()) {
|
||||
case PRIMARY_RELOCATION:
|
||||
case REPLICA:
|
||||
try {
|
||||
markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState);
|
||||
recoveryTargetService.startRecovery(this, recoveryState.getType(), recoveryState.getSourceNode(), recoveryListener);
|
||||
} catch (Throwable e) {
|
||||
failShard("corrupted preexisting index", e);
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
|
||||
}
|
||||
break;
|
||||
case STORE:
|
||||
markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread
|
||||
threadPool.generic().execute(() -> {
|
||||
try {
|
||||
if (recoverFromStore()) {
|
||||
recoveryListener.onRecoveryDone(recoveryState);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, t), true);
|
||||
}
|
||||
});
|
||||
break;
|
||||
case LOCAL_SHARDS:
|
||||
final IndexMetaData indexMetaData = indexSettings().getIndexMetaData();
|
||||
final Index mergeSourceIndex = indexMetaData.getMergeSourceIndex();
|
||||
final List<IndexShard> startedShards = new ArrayList<>();
|
||||
final IndexService sourceIndexService = indicesService.indexService(mergeSourceIndex);
|
||||
final int numShards = sourceIndexService != null ? sourceIndexService.getIndexSettings().getNumberOfShards() : -1;
|
||||
|
@ -1414,14 +1412,14 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
threadPool.generic().execute(() -> {
|
||||
try {
|
||||
final Set<ShardId> shards = IndexMetaData.selectShrinkShards(shardId().id(), sourceIndexService.getMetaData(),
|
||||
indexMetaData.getNumberOfShards());
|
||||
+ indexMetaData.getNumberOfShards());
|
||||
if (recoverFromLocalShards(mappingUpdateConsumer, startedShards.stream()
|
||||
.filter((s) -> shards.contains(s.shardId())).collect(Collectors.toList()))) {
|
||||
recoveryListener.onRecoveryDone(recoveryState);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
recoveryListener.onRecoveryFailure(recoveryState,
|
||||
new RecoveryFailedException(shardId, localNode, localNode, t), true);
|
||||
new RecoveryFailedException(recoveryState, null, t), true);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
|
@ -1433,36 +1431,25 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
+ " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard "
|
||||
+ shardId());
|
||||
}
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, localNode, localNode, t), true);
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, t), true);
|
||||
}
|
||||
} else {
|
||||
markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread
|
||||
break;
|
||||
case SNAPSHOT:
|
||||
markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
|
||||
threadPool.generic().execute(() -> {
|
||||
try {
|
||||
if (recoverFromStore()) {
|
||||
final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(
|
||||
recoveryState.getRestoreSource().snapshot().getRepository());
|
||||
if (restoreFromRepository(indexShardRepository)) {
|
||||
recoveryListener.onRecoveryDone(recoveryState);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, t), true);
|
||||
} catch (Throwable first) {
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, first), true);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// recover from a restore
|
||||
final RecoveryState recoveryState = new RecoveryState(shardId(), shardRouting.primary(),
|
||||
RecoveryState.Type.SNAPSHOT, shardRouting.restoreSource(), localNode);
|
||||
markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
|
||||
threadPool.generic().execute(() -> {
|
||||
try {
|
||||
final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshot().getRepository());
|
||||
if (restoreFromRepository(indexShardRepository)) {
|
||||
recoveryListener.onRecoveryDone(recoveryState);
|
||||
}
|
||||
} catch (Throwable first) {
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(shardId, sourceNode, localNode, first), true);
|
||||
}
|
||||
});
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown recovery type " + recoveryState.getType());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1472,7 +1459,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
// called by the current engine
|
||||
@Override
|
||||
public void onFailedEngine(String reason, @Nullable Throwable failure) {
|
||||
final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure, getIndexUUID());
|
||||
final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure);
|
||||
for (Callback<ShardFailure> listener : delegates) {
|
||||
try {
|
||||
listener.handle(shardFailure);
|
||||
|
@ -1661,13 +1648,11 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
public final String reason;
|
||||
@Nullable
|
||||
public final Throwable cause;
|
||||
public final String indexUUID;
|
||||
|
||||
public ShardFailure(ShardRouting routing, String reason, @Nullable Throwable cause, String indexUUID) {
|
||||
public ShardFailure(ShardRouting routing, String reason, @Nullable Throwable cause) {
|
||||
this.routing = routing;
|
||||
this.reason = reason;
|
||||
this.cause = cause;
|
||||
this.indexUUID = indexUUID;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,15 +68,16 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
* @param location the location to listen for
|
||||
* @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with
|
||||
* false otherwise.
|
||||
* @return did we call the listener (true) or register the listener to call later (false)?
|
||||
*/
|
||||
public void addOrNotify(Translog.Location location, Consumer<Boolean> listener) {
|
||||
public boolean addOrNotify(Translog.Location location, Consumer<Boolean> listener) {
|
||||
requireNonNull(listener, "listener cannot be null");
|
||||
requireNonNull(location, "location cannot be null");
|
||||
|
||||
if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) {
|
||||
// Location already visible, just call the listener
|
||||
listener.accept(false);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
synchronized (this) {
|
||||
if (refreshListeners == null) {
|
||||
|
@ -85,12 +86,13 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) {
|
||||
// We have a free slot so register the listener
|
||||
refreshListeners.add(new Tuple<>(location, listener));
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// No free slot so force a refresh and call the listener in this thread
|
||||
forceRefresh.run();
|
||||
listener.accept(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -135,14 +137,14 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
*/
|
||||
return;
|
||||
}
|
||||
// First check if we've actually moved forward. If not then just bail immediately.
|
||||
assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0;
|
||||
if (lastRefreshedLocation != null && currentRefreshLocation.compareTo(lastRefreshedLocation) == 0) {
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing
|
||||
* around with refreshListeners or synchronizing at all.
|
||||
* around with refreshListeners or synchronizing at all. Note that it is not safe for us to abort early if we haven't advanced the
|
||||
* position here because we set and read lastRefreshedLocation outside of a synchronized block. We do that so that waiting for a
|
||||
* refresh that has already passed is just a volatile read but the cost is that any check whether or not we've advanced the
|
||||
* position will introduce a race between adding the listener and the position check. We could work around this by moving this
|
||||
* assignment into the synchronized block below and double checking lastRefreshedLocation in addOrNotify's synchronized block but
|
||||
* that doesn't seem worth it given that we already skip this process early if there aren't any listeners to iterate.
|
||||
*/
|
||||
lastRefreshedLocation = currentRefreshLocation;
|
||||
/*
|
||||
|
|
|
@ -59,16 +59,16 @@ public final class ShadowIndexShard extends IndexShard {
|
|||
|
||||
/**
|
||||
* In addition to the regular accounting done in
|
||||
* {@link IndexShard#updateRoutingEntry(ShardRouting, boolean)},
|
||||
* {@link IndexShard#updateRoutingEntry(ShardRouting)},
|
||||
* if this shadow replica needs to be promoted to a primary, the shard is
|
||||
* failed in order to allow a new primary to be re-allocated.
|
||||
*/
|
||||
@Override
|
||||
public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) throws IOException {
|
||||
public void updateRoutingEntry(ShardRouting newRouting) throws IOException {
|
||||
if (newRouting.primary() == true) {// becoming a primary
|
||||
throw new IllegalStateException("can't promote shard to primary");
|
||||
}
|
||||
super.updateRoutingEntry(newRouting, persistState);
|
||||
super.updateRoutingEntry(newRouting);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -131,16 +131,7 @@ final class StoreRecovery {
|
|||
}
|
||||
|
||||
final void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException {
|
||||
/*
|
||||
* TODO: once we upgraded to Lucene 6.1 use HardlinkCopyDirectoryWrapper to enable hardlinks if possible and enable it
|
||||
* in the security.policy:
|
||||
*
|
||||
* grant codeBase "${codebase.lucene-misc-6.1.0.jar}" {
|
||||
* // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
|
||||
* permission java.nio.file.LinkPermission "hard";
|
||||
* };
|
||||
* target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
|
||||
*/
|
||||
target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
|
||||
try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats),
|
||||
new IndexWriterConfig(null)
|
||||
.setCommitOnClose(false)
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -49,7 +51,6 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
|
||||
|
@ -458,7 +459,9 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
}
|
||||
if (latest >= 0) {
|
||||
try {
|
||||
return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest);
|
||||
final BlobStoreIndexShardSnapshots shardSnapshots =
|
||||
indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest));
|
||||
return new Tuple<>(shardSnapshots, latest);
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest);
|
||||
}
|
||||
|
@ -503,10 +506,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
*/
|
||||
public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
|
||||
super(snapshotId, Version.CURRENT, shardId);
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
store = indexService.getShardOrNull(shardId.id()).store();
|
||||
this.snapshotStatus = snapshotStatus;
|
||||
|
||||
store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -788,8 +789,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
*/
|
||||
public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
|
||||
super(snapshotId, version, shardId, snapshotShardId);
|
||||
store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
|
||||
this.recoveryState = recoveryState;
|
||||
store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -800,6 +801,25 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
try {
|
||||
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
|
||||
BlobStoreIndexShardSnapshot snapshot = loadSnapshot();
|
||||
|
||||
if (snapshot.indexFiles().size() == 1
|
||||
&& snapshot.indexFiles().get(0).physicalName().startsWith("segments_")
|
||||
&& snapshot.indexFiles().get(0).hasUnknownChecksum()) {
|
||||
// If the shard has no documents, it will only contain a single segments_N file for the
|
||||
// shard's snapshot. If we are restoring a snapshot created by a previous supported version,
|
||||
// it is still possible that in that version, an empty shard has a segments_N file with an unsupported
|
||||
// version (and no checksum), because we don't know the Lucene version to assign segments_N until we
|
||||
// have written some data. Since the segments_N for an empty shard could have an incompatible Lucene
|
||||
// version number and no checksum, even though the index itself is perfectly fine to restore, this
|
||||
// empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty
|
||||
// shard anyway, we just create the empty shard here and then exit.
|
||||
IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null)
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
|
||||
.setCommitOnClose(true));
|
||||
writer.close();
|
||||
return;
|
||||
}
|
||||
|
||||
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
|
||||
final Store.MetadataSnapshot recoveryTargetMetadata;
|
||||
try {
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.index.snapshots.blobstore;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
* Information about snapshotted file
|
||||
*/
|
||||
public static class FileInfo {
|
||||
private static final String UNKNOWN_CHECKSUM = "_na_";
|
||||
|
||||
private final String name;
|
||||
private final ByteSizeValue partSize;
|
||||
private final long partBytes;
|
||||
|
@ -207,27 +208,43 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
* @return true if file in a store this this file have the same checksum and length
|
||||
*/
|
||||
public boolean isSame(FileInfo fileInfo) {
|
||||
if (numberOfParts != fileInfo.numberOfParts) return false;
|
||||
if (partBytes != fileInfo.partBytes) return false;
|
||||
if (!name.equals(fileInfo.name)) return false;
|
||||
if (numberOfParts != fileInfo.numberOfParts) {
|
||||
return false;
|
||||
}
|
||||
if (partBytes != fileInfo.partBytes) {
|
||||
return false;
|
||||
}
|
||||
if (!name.equals(fileInfo.name)) {
|
||||
return false;
|
||||
}
|
||||
if (partSize != null) {
|
||||
if (!partSize.equals(fileInfo.partSize)) return false;
|
||||
if (!partSize.equals(fileInfo.partSize)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (fileInfo.partSize != null) return false;
|
||||
if (fileInfo.partSize != null) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return metadata.isSame(fileInfo.metadata);
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String NAME = "name";
|
||||
static final String PHYSICAL_NAME = "physical_name";
|
||||
static final String LENGTH = "length";
|
||||
static final String CHECKSUM = "checksum";
|
||||
static final String PART_SIZE = "part_size";
|
||||
static final String WRITTEN_BY = "written_by";
|
||||
static final String META_HASH = "meta_hash";
|
||||
/**
|
||||
* Checks if the checksum for the file is unknown. This only is possible on an empty shard's
|
||||
* segments_N file which was created in older Lucene versions.
|
||||
*/
|
||||
public boolean hasUnknownChecksum() {
|
||||
return metadata.checksum().equals(UNKNOWN_CHECKSUM);
|
||||
}
|
||||
|
||||
static final String NAME = "name";
|
||||
static final String PHYSICAL_NAME = "physical_name";
|
||||
static final String LENGTH = "length";
|
||||
static final String CHECKSUM = "checksum";
|
||||
static final String PART_SIZE = "part_size";
|
||||
static final String WRITTEN_BY = "written_by";
|
||||
static final String META_HASH = "meta_hash";
|
||||
|
||||
/**
|
||||
* Serializes file info into JSON
|
||||
*
|
||||
|
@ -237,22 +254,22 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
*/
|
||||
public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.NAME, file.name);
|
||||
builder.field(Fields.PHYSICAL_NAME, file.metadata.name());
|
||||
builder.field(Fields.LENGTH, file.metadata.length());
|
||||
if (file.metadata.checksum() != null) {
|
||||
builder.field(Fields.CHECKSUM, file.metadata.checksum());
|
||||
builder.field(NAME, file.name);
|
||||
builder.field(PHYSICAL_NAME, file.metadata.name());
|
||||
builder.field(LENGTH, file.metadata.length());
|
||||
if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) {
|
||||
builder.field(CHECKSUM, file.metadata.checksum());
|
||||
}
|
||||
if (file.partSize != null) {
|
||||
builder.field(Fields.PART_SIZE, file.partSize.bytes());
|
||||
builder.field(PART_SIZE, file.partSize.bytes());
|
||||
}
|
||||
|
||||
if (file.metadata.writtenBy() != null) {
|
||||
builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy());
|
||||
builder.field(WRITTEN_BY, file.metadata.writtenBy());
|
||||
}
|
||||
|
||||
if (file.metadata.hash() != null && file.metadata().hash().length > 0) {
|
||||
builder.field(Fields.META_HASH, file.metadata.hash());
|
||||
builder.field(META_HASH, file.metadata.hash());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -271,6 +288,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
String checksum = null;
|
||||
ByteSizeValue partSize = null;
|
||||
Version writtenBy = null;
|
||||
String writtenByStr = null;
|
||||
BytesRef metaHash = new BytesRef();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -278,19 +296,20 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
if (token.isValue()) {
|
||||
if ("name".equals(currentFieldName)) {
|
||||
if (NAME.equals(currentFieldName)) {
|
||||
name = parser.text();
|
||||
} else if ("physical_name".equals(currentFieldName)) {
|
||||
} else if (PHYSICAL_NAME.equals(currentFieldName)) {
|
||||
physicalName = parser.text();
|
||||
} else if ("length".equals(currentFieldName)) {
|
||||
} else if (LENGTH.equals(currentFieldName)) {
|
||||
length = parser.longValue();
|
||||
} else if ("checksum".equals(currentFieldName)) {
|
||||
} else if (CHECKSUM.equals(currentFieldName)) {
|
||||
checksum = parser.text();
|
||||
} else if ("part_size".equals(currentFieldName)) {
|
||||
} else if (PART_SIZE.equals(currentFieldName)) {
|
||||
partSize = new ByteSizeValue(parser.longValue());
|
||||
} else if ("written_by".equals(currentFieldName)) {
|
||||
writtenBy = Lucene.parseVersionLenient(parser.text(), null);
|
||||
} else if ("meta_hash".equals(currentFieldName)) {
|
||||
} else if (WRITTEN_BY.equals(currentFieldName)) {
|
||||
writtenByStr = parser.text();
|
||||
writtenBy = Lucene.parseVersionLenient(writtenByStr, null);
|
||||
} else if (META_HASH.equals(currentFieldName)) {
|
||||
metaHash.bytes = parser.binaryValue();
|
||||
metaHash.offset = 0;
|
||||
metaHash.length = metaHash.bytes.length;
|
||||
|
@ -305,6 +324,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that file information is complete
|
||||
if (name == null || Strings.validFileName(name) == false) {
|
||||
throw new ElasticsearchParseException("missing or invalid file name [" + name + "]");
|
||||
|
@ -312,10 +332,29 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
throw new ElasticsearchParseException("missing or invalid physical file name [" + physicalName + "]");
|
||||
} else if (length < 0) {
|
||||
throw new ElasticsearchParseException("missing or invalid file length");
|
||||
} else if (writtenBy == null) {
|
||||
throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]");
|
||||
} else if (checksum == null) {
|
||||
if (physicalName.startsWith("segments_")
|
||||
&& writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) {
|
||||
// its possible the checksum is null for segments_N files that belong to a shard with no data,
|
||||
// so we will assign it _na_ for now and try to get the checksum from the file itself later
|
||||
checksum = UNKNOWN_CHECKSUM;
|
||||
} else {
|
||||
throw new ElasticsearchParseException("missing checksum for name [" + name + "]");
|
||||
}
|
||||
}
|
||||
return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[name: " + name +
|
||||
", numberOfParts: " + numberOfParts +
|
||||
", partSize: " + partSize +
|
||||
", partBytes: " + partBytes +
|
||||
", metadata: " + metadata + "]";
|
||||
}
|
||||
}
|
||||
|
||||
private final String snapshot;
|
||||
|
@ -424,26 +463,21 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
return totalSize;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String NAME = "name";
|
||||
static final String INDEX_VERSION = "index_version";
|
||||
static final String START_TIME = "start_time";
|
||||
static final String TIME = "time";
|
||||
static final String NUMBER_OF_FILES = "number_of_files";
|
||||
static final String TOTAL_SIZE = "total_size";
|
||||
static final String FILES = "files";
|
||||
}
|
||||
|
||||
static final class ParseFields {
|
||||
static final ParseField NAME = new ParseField("name");
|
||||
static final ParseField INDEX_VERSION = new ParseField("index_version", "index-version");
|
||||
static final ParseField START_TIME = new ParseField("start_time");
|
||||
static final ParseField TIME = new ParseField("time");
|
||||
static final ParseField NUMBER_OF_FILES = new ParseField("number_of_files");
|
||||
static final ParseField TOTAL_SIZE = new ParseField("total_size");
|
||||
static final ParseField FILES = new ParseField("files");
|
||||
}
|
||||
private static final String NAME = "name";
|
||||
private static final String INDEX_VERSION = "index_version";
|
||||
private static final String START_TIME = "start_time";
|
||||
private static final String TIME = "time";
|
||||
private static final String NUMBER_OF_FILES = "number_of_files";
|
||||
private static final String TOTAL_SIZE = "total_size";
|
||||
private static final String FILES = "files";
|
||||
|
||||
private static final ParseField PARSE_NAME = new ParseField("name");
|
||||
private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version");
|
||||
private static final ParseField PARSE_START_TIME = new ParseField("start_time");
|
||||
private static final ParseField PARSE_TIME = new ParseField("time");
|
||||
private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files");
|
||||
private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size");
|
||||
private static final ParseField PARSE_FILES = new ParseField("files");
|
||||
|
||||
/**
|
||||
* Serializes shard snapshot metadata info into JSON
|
||||
|
@ -453,13 +487,13 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
*/
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.NAME, snapshot);
|
||||
builder.field(Fields.INDEX_VERSION, indexVersion);
|
||||
builder.field(Fields.START_TIME, startTime);
|
||||
builder.field(Fields.TIME, time);
|
||||
builder.field(Fields.NUMBER_OF_FILES, numberOfFiles);
|
||||
builder.field(Fields.TOTAL_SIZE, totalSize);
|
||||
builder.startArray(Fields.FILES);
|
||||
builder.field(NAME, snapshot);
|
||||
builder.field(INDEX_VERSION, indexVersion);
|
||||
builder.field(START_TIME, startTime);
|
||||
builder.field(TIME, time);
|
||||
builder.field(NUMBER_OF_FILES, numberOfFiles);
|
||||
builder.field(TOTAL_SIZE, totalSize);
|
||||
builder.startArray(FILES);
|
||||
for (FileInfo fileInfo : indexFiles) {
|
||||
FileInfo.toXContent(fileInfo, builder, params);
|
||||
}
|
||||
|
@ -493,24 +527,24 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
if (token.isValue()) {
|
||||
if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) {
|
||||
if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) {
|
||||
snapshot = parser.text();
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) {
|
||||
// The index-version is needed for backward compatibility with v 1.0
|
||||
indexVersion = parser.longValue();
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) {
|
||||
startTime = parser.longValue();
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) {
|
||||
time = parser.longValue();
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) {
|
||||
numberOfFiles = parser.intValue();
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) {
|
||||
totalSize = parser.longValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) {
|
||||
if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) {
|
||||
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
indexFiles.add(FileInfo.fromXContent(parser));
|
||||
}
|
||||
|
@ -526,6 +560,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
}
|
||||
}
|
||||
return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles),
|
||||
startTime, time, numberOfFiles, totalSize);
|
||||
startTime, time, numberOfFiles, totalSize);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.lucene.store.IndexOutput;
|
|||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
@ -444,11 +443,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
|
||||
/**
|
||||
* The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version
|
||||
* and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only
|
||||
* a legacy checksum, returned IndexOutput will not verify the checksum.
|
||||
* The returned IndexOutput validates the files checksum.
|
||||
* <p>
|
||||
* Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the
|
||||
* Note: Checksums are calculated by default since version 4.8.0. This method only adds the
|
||||
* verification against the checksum in the given metadata and does not add any significant overhead.
|
||||
*/
|
||||
public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {
|
||||
|
@ -652,17 +649,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
// different in the diff. That's why we have to double check here again if the rest of it matches.
|
||||
|
||||
// all is fine this file is just part of a commit or a segment that is different
|
||||
final boolean same = local.isSame(remote);
|
||||
|
||||
// this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just
|
||||
// verifying that we are consistent on both ends source and target
|
||||
final boolean hashAndLengthEqual = (
|
||||
local.checksum() == null
|
||||
&& remote.checksum() == null
|
||||
&& local.hash().equals(remote.hash())
|
||||
&& local.length() == remote.length());
|
||||
final boolean consistent = hashAndLengthEqual || same;
|
||||
if (consistent == false) {
|
||||
if (local.isSame(remote) == false) {
|
||||
logger.debug("Files are different on the recovery target: {} ", recoveryDiff);
|
||||
throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null);
|
||||
}
|
||||
|
@ -898,18 +885,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
|
||||
*/
|
||||
public static BytesRef hashFile(Directory directory, String file) throws IOException {
|
||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
||||
try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
|
||||
hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length());
|
||||
}
|
||||
return fileHash.get();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
|
||||
*/
|
||||
|
|
|
@ -21,10 +21,8 @@ package org.elasticsearch.index.store;
|
|||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
||||
|
@ -58,14 +56,15 @@ public class StoreFileMetaData implements Writeable {
|
|||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
||||
assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that "
|
||||
+ FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||
Objects.requireNonNull(writtenBy, "writtenBy must not be null");
|
||||
Objects.requireNonNull(checksum, "checksum must not be null");
|
||||
this.name = name;
|
||||
// its possible here to have a _na_ checksum or an unsupported writtenBy version, if the
|
||||
// file is a segments_N file, but that is fine in the case of a segments_N file because
|
||||
// we handle that case upstream
|
||||
assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) :
|
||||
"index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||
this.name = Objects.requireNonNull(name, "name must not be null");
|
||||
this.length = length;
|
||||
this.checksum = checksum;
|
||||
this.writtenBy = writtenBy;
|
||||
this.checksum = Objects.requireNonNull(checksum, "checksum must not be null");
|
||||
this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null");
|
||||
this.hash = hash == null ? new BytesRef() : hash;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ import java.util.IdentityHashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable {
|
||||
|
||||
|
@ -52,6 +53,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
|||
"indices.queries.cache.size", "10%", Property.NodeScope);
|
||||
public static final Setting<Integer> INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting(
|
||||
"indices.queries.cache.count", 10000, 1, Property.NodeScope);
|
||||
// enables caching on all segments instead of only the larger ones, for testing only
|
||||
public static final Setting<Boolean> INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting(
|
||||
"indices.queries.cache.all_segments", false, Property.NodeScope);
|
||||
|
||||
private final LRUQueryCache cache;
|
||||
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
|
||||
|
@ -69,111 +73,11 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
|||
final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings);
|
||||
logger.debug("using [node] query cache with size [{}] max filter count [{}]",
|
||||
size, count);
|
||||
cache = new LRUQueryCache(count, size.bytes()) {
|
||||
|
||||
private Stats getStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
if (shardId == null) {
|
||||
return null;
|
||||
}
|
||||
return shardStats.get(shardId);
|
||||
}
|
||||
|
||||
private Stats getOrCreateStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
Stats stats = shardStats.get(shardId);
|
||||
if (stats == null) {
|
||||
stats = new Stats();
|
||||
shardStats.put(shardId, stats);
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
// It's ok to not protect these callbacks by a lock since it is
|
||||
// done in LRUQueryCache
|
||||
@Override
|
||||
protected void onClear() {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onClear();
|
||||
for (Stats stats : shardStats.values()) {
|
||||
// don't throw away hit/miss
|
||||
stats.cacheSize = 0;
|
||||
stats.ramBytesUsed = 0;
|
||||
}
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryCache(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryCache(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed += ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryEviction(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryEviction(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed -= ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.cacheSize += 1;
|
||||
shardStats.cacheCount += 1;
|
||||
shardStats.ramBytesUsed += ramBytesUsed;
|
||||
|
||||
StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
if (statsAndCount == null) {
|
||||
statsAndCount = new StatsAndCount(shardStats);
|
||||
stats2.put(readerCoreKey, statsAndCount);
|
||||
}
|
||||
statsAndCount.count += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
|
||||
// onDocIdSetEviction might sometimes be called with a number
|
||||
// of entries equal to zero if the cache for the given segment
|
||||
// was already empty when the close listener was called
|
||||
if (numEntries > 0) {
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onHit(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onHit(readerCoreKey, filter);
|
||||
final Stats shardStats = getStats(readerCoreKey);
|
||||
shardStats.hitCount += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onMiss(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onMiss(readerCoreKey, filter);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.missCount += 1;
|
||||
}
|
||||
};
|
||||
if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) {
|
||||
cache = new ElasticsearchLRUQueryCache(count, size.bytes(), context -> true);
|
||||
} else {
|
||||
cache = new ElasticsearchLRUQueryCache(count, size.bytes());
|
||||
}
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
|
@ -316,4 +220,111 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
|||
assert empty(shardStats.get(shardId));
|
||||
shardStats.remove(shardId);
|
||||
}
|
||||
|
||||
private class ElasticsearchLRUQueryCache extends LRUQueryCache {
|
||||
|
||||
ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed, Predicate<LeafReaderContext> leavesToCache) {
|
||||
super(maxSize, maxRamBytesUsed, leavesToCache);
|
||||
}
|
||||
|
||||
ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed) {
|
||||
super(maxSize, maxRamBytesUsed);
|
||||
}
|
||||
|
||||
private Stats getStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
if (shardId == null) {
|
||||
return null;
|
||||
}
|
||||
return shardStats.get(shardId);
|
||||
}
|
||||
|
||||
private Stats getOrCreateStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
Stats stats = shardStats.get(shardId);
|
||||
if (stats == null) {
|
||||
stats = new Stats();
|
||||
shardStats.put(shardId, stats);
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
// It's ok to not protect these callbacks by a lock since it is
|
||||
// done in LRUQueryCache
|
||||
@Override
|
||||
protected void onClear() {
|
||||
super.onClear();
|
||||
for (Stats stats : shardStats.values()) {
|
||||
// don't throw away hit/miss
|
||||
stats.cacheSize = 0;
|
||||
stats.ramBytesUsed = 0;
|
||||
}
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryCache(Query filter, long ramBytesUsed) {
|
||||
super.onQueryCache(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed += ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryEviction(Query filter, long ramBytesUsed) {
|
||||
super.onQueryEviction(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed -= ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
|
||||
super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.cacheSize += 1;
|
||||
shardStats.cacheCount += 1;
|
||||
shardStats.ramBytesUsed += ramBytesUsed;
|
||||
|
||||
StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
if (statsAndCount == null) {
|
||||
statsAndCount = new StatsAndCount(shardStats);
|
||||
stats2.put(readerCoreKey, statsAndCount);
|
||||
}
|
||||
statsAndCount.count += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
|
||||
super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
|
||||
// onDocIdSetEviction might sometimes be called with a number
|
||||
// of entries equal to zero if the cache for the given segment
|
||||
// was already empty when the close listener was called
|
||||
if (numEntries > 0) {
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onHit(Object readerCoreKey, Query filter) {
|
||||
super.onHit(readerCoreKey, filter);
|
||||
final Stats shardStats = getStats(readerCoreKey);
|
||||
shardStats.hitCount += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onMiss(Object readerCoreKey, Query filter) {
|
||||
super.onMiss(readerCoreKey, filter);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.missCount += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
|
@ -55,6 +56,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
@ -86,10 +88,14 @@ import org.elasticsearch.index.shard.IndexingStats;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTargetService;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
|
@ -124,7 +130,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class IndicesService extends AbstractLifecycleComponent<IndicesService> implements Iterable<IndexService>, IndexService.ShardStoreDeleter {
|
||||
public class IndicesService extends AbstractLifecycleComponent<IndicesService>
|
||||
implements IndicesClusterStateService.AllocatedIndices<IndexShard, IndexService>, IndexService.ShardStoreDeleter {
|
||||
|
||||
public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout";
|
||||
public static final Setting<TimeValue> INDICES_CACHE_CLEAN_INTERVAL_SETTING =
|
||||
|
@ -296,11 +303,14 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if changes (adding / removing) indices, shards and so on are allowed.
|
||||
* Checks if changes (adding / removing) indices, shards and so on are allowed.
|
||||
*
|
||||
* @throws IllegalStateException if no changes allowed.
|
||||
*/
|
||||
public boolean changesAllowed() {
|
||||
// we check on stop here since we defined stop when we delete the indices
|
||||
return lifecycle.started();
|
||||
private void ensureChangesAllowed() {
|
||||
if (lifecycle.started() == false) {
|
||||
throw new IllegalStateException("Can't make changes to indices service, node is closed");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -314,10 +324,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
|
||||
/**
|
||||
* Returns an IndexService for the specified index if exists otherwise returns <code>null</code>.
|
||||
*
|
||||
*/
|
||||
@Nullable
|
||||
public IndexService indexService(Index index) {
|
||||
@Override
|
||||
public @Nullable IndexService indexService(Index index) {
|
||||
return indices.get(index.getUUID());
|
||||
}
|
||||
|
||||
|
@ -339,11 +348,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
* @param builtInListeners a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with the per-index listeners
|
||||
* @throws IndexAlreadyExistsException if the index already exists.
|
||||
*/
|
||||
@Override
|
||||
public synchronized IndexService createIndex(final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, List<IndexEventListener> builtInListeners) throws IOException {
|
||||
|
||||
if (!lifecycle.started()) {
|
||||
throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed");
|
||||
}
|
||||
ensureChangesAllowed();
|
||||
if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
|
||||
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
|
||||
}
|
||||
|
@ -424,14 +431,44 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
|
||||
RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
|
||||
NodeServicesProvider nodeServicesProvider, Callback<IndexShard.ShardFailure> onShardFailure) throws IOException {
|
||||
ensureChangesAllowed();
|
||||
IndexService indexService = indexService(shardRouting.index());
|
||||
IndexShard indexShard = indexService.createShard(shardRouting);
|
||||
indexShard.addShardFailureCallback(onShardFailure);
|
||||
indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService,
|
||||
(type, mapping) -> {
|
||||
assert recoveryState.getType() == RecoveryState.Type.LOCAL_SHARDS :
|
||||
"mapping update consumer only required by local shards recovery";
|
||||
try {
|
||||
nodeServicesProvider.getClient().admin().indices().preparePutMapping()
|
||||
.setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid
|
||||
.setType(type)
|
||||
.setSource(mapping.source().string())
|
||||
.get();
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to stringify mapping source", ex);
|
||||
}
|
||||
}, this);
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the given index from this service and releases all associated resources. Persistent parts of the index
|
||||
* like the shards files, state and transaction logs are kept around in the case of a disaster recovery.
|
||||
* @param index the index to remove
|
||||
* @param reason the high level reason causing this removal
|
||||
*/
|
||||
@Override
|
||||
public void removeIndex(Index index, String reason) {
|
||||
removeIndex(index, reason, false);
|
||||
try {
|
||||
removeIndex(index, reason, false);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to remove index ({})", e, reason);
|
||||
}
|
||||
}
|
||||
|
||||
private void removeIndex(Index index, String reason, boolean delete) {
|
||||
|
@ -516,14 +553,20 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
* @param index the index to delete
|
||||
* @param reason the high level reason causing this delete
|
||||
*/
|
||||
public void deleteIndex(Index index, String reason) throws IOException {
|
||||
removeIndex(index, reason, true);
|
||||
@Override
|
||||
public void deleteIndex(Index index, String reason) {
|
||||
try {
|
||||
removeIndex(index, reason, true);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to delete index ({})", e, reason);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index
|
||||
* but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)}
|
||||
*/
|
||||
@Override
|
||||
public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) {
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
String indexName = metaData.getIndex().getName();
|
||||
|
@ -683,8 +726,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
* @param clusterState {@code ClusterState} to ensure the index is not part of it
|
||||
* @return IndexMetaData for the index loaded from disk
|
||||
*/
|
||||
@Nullable
|
||||
public IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
|
||||
@Override
|
||||
public @Nullable IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
|
||||
// this method should only be called when we know the index (name + uuid) is not part of the cluster state
|
||||
if (clusterState.metaData().index(index) != null) {
|
||||
throw new IllegalStateException("Cannot delete index [" + index + "], it is still part of the cluster state.");
|
||||
|
@ -839,6 +882,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
* @param index the index to process the pending deletes for
|
||||
* @param timeout the timeout used for processing pending deletes
|
||||
*/
|
||||
@Override
|
||||
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException {
|
||||
logger.debug("{} processing pending deletes", index);
|
||||
final long startTimeNS = System.nanoTime();
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -114,11 +114,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
final ClusterState state = clusterService.state();
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
|
||||
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
|
||||
int totalNumberOfShards = 0;
|
||||
int numberOfShards = 0;
|
||||
for (Index index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
|
||||
totalNumberOfShards += indexMetaData.getTotalNumberOfShards();
|
||||
numberOfShards += indexMetaData.getNumberOfShards();
|
||||
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
|
||||
|
||||
|
@ -127,7 +125,6 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
listener.onResponse(new SyncedFlushResponse(results));
|
||||
return;
|
||||
}
|
||||
final int finalTotalNumberOfShards = totalNumberOfShards;
|
||||
final CountDown countDown = new CountDown(numberOfShards);
|
||||
|
||||
for (final Index concreteIndex : concreteIndices) {
|
||||
|
@ -136,7 +133,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
|
||||
for (int shard = 0; shard < indexNumberOfShards; shard++) {
|
||||
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
|
||||
attemptSyncedFlush(shardId, new ActionListener<ShardsSyncedFlushResult>() {
|
||||
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
|
||||
@Override
|
||||
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
|
||||
results.get(index).add(syncedFlushResult);
|
||||
|
@ -148,7 +145,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.debug("{} unexpected error while executing synced flush", shardId);
|
||||
results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage()));
|
||||
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
|
||||
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(new SyncedFlushResponse(results));
|
||||
}
|
||||
|
@ -185,8 +183,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
* Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
|
||||
**/
|
||||
public void attemptSyncedFlush(final ShardId shardId, final ActionListener<ShardsSyncedFlushResult> actionListener) {
|
||||
innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener);
|
||||
}
|
||||
|
||||
private void innerAttemptSyncedFlush(final ShardId shardId, final ClusterState state, final ActionListener<ShardsSyncedFlushResult> actionListener) {
|
||||
try {
|
||||
final ClusterState state = clusterService.state();
|
||||
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
|
||||
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
|
||||
final int totalShards = shardRoutingTable.getSize();
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -76,7 +75,6 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
|
|||
return position;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return metaData.checksum();
|
||||
}
|
||||
|
@ -105,11 +103,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
|
|||
String name = in.readString();
|
||||
position = in.readVLong();
|
||||
long length = in.readVLong();
|
||||
String checksum = in.readOptionalString();
|
||||
String checksum = in.readString();
|
||||
content = in.readBytesReference();
|
||||
Version writtenBy = null;
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
Version writtenBy = Lucene.parseVersionLenient(in.readString(), null);
|
||||
assert writtenBy != null;
|
||||
metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
|
||||
lastChunk = in.readBoolean();
|
||||
totalTranslogOps = in.readVInt();
|
||||
|
@ -124,9 +121,9 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
|
|||
out.writeString(metaData.name());
|
||||
out.writeVLong(position);
|
||||
out.writeVLong(metaData.length());
|
||||
out.writeOptionalString(metaData.checksum());
|
||||
out.writeString(metaData.checksum());
|
||||
out.writeBytesReference(content);
|
||||
out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());
|
||||
out.writeString(metaData.writtenBy().toString());
|
||||
out.writeBoolean(lastChunk);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
out.writeLong(sourceThrottleTimeInNanos);
|
||||
|
|
|
@ -115,4 +115,5 @@ public abstract class BlobStoreFormat<T extends ToXContent> {
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -826,9 +826,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
if (context.scrollContext() == null) {
|
||||
throw new SearchContextException(context, "`slice` cannot be used outside of a scroll context");
|
||||
}
|
||||
context.sliceFilter(source.slice().toFilter(queryShardContext,
|
||||
context.shardTarget().getShardId().getId(),
|
||||
queryShardContext.getIndexSettings().getNumberOfShards()));
|
||||
context.sliceBuilder(source.slice());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
|||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath.PathElement;
|
||||
import org.elasticsearch.search.profile.Profilers;
|
||||
import org.elasticsearch.search.profile.aggregation.ProfilingAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -81,7 +83,12 @@ public class AggregatorFactories {
|
|||
// propagate the fact that only bucket 0 will be collected with single-bucket
|
||||
// aggs
|
||||
final boolean collectsFromSingleBucket = false;
|
||||
aggregators[i] = factories[i].create(parent, collectsFromSingleBucket);
|
||||
Aggregator factory = factories[i].create(parent, collectsFromSingleBucket);
|
||||
Profilers profilers = factory.context().searchContext().getProfilers();
|
||||
if (profilers != null) {
|
||||
factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler());
|
||||
}
|
||||
aggregators[i] = factory;
|
||||
}
|
||||
return aggregators;
|
||||
}
|
||||
|
@ -92,7 +99,12 @@ public class AggregatorFactories {
|
|||
for (int i = 0; i < factories.length; i++) {
|
||||
// top-level aggs only get called with bucket 0
|
||||
final boolean collectsFromSingleBucket = true;
|
||||
aggregators[i] = factories[i].create(null, collectsFromSingleBucket);
|
||||
Aggregator factory = factories[i].create(null, collectsFromSingleBucket);
|
||||
Profilers profilers = factory.context().searchContext().getProfilers();
|
||||
if (profilers != null) {
|
||||
factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler());
|
||||
}
|
||||
aggregators[i] = factory;
|
||||
}
|
||||
return aggregators;
|
||||
}
|
||||
|
|
|
@ -28,13 +28,139 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type;
|
|||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.internal.SearchContext.Lifetime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class AggregatorFactory<AF extends AggregatorFactory<AF>> {
|
||||
|
||||
public static final class MultiBucketAggregatorWrapper extends Aggregator {
|
||||
private final BigArrays bigArrays;
|
||||
private final Aggregator parent;
|
||||
private final AggregatorFactory<?> factory;
|
||||
private final Aggregator first;
|
||||
ObjectArray<Aggregator> aggregators;
|
||||
ObjectArray<LeafBucketCollector> collectors;
|
||||
|
||||
MultiBucketAggregatorWrapper(BigArrays bigArrays, AggregationContext context, Aggregator parent, AggregatorFactory<?> factory,
|
||||
Aggregator first) {
|
||||
this.bigArrays = bigArrays;
|
||||
this.parent = parent;
|
||||
this.factory = factory;
|
||||
this.first = first;
|
||||
context.searchContext().addReleasable(this, Lifetime.PHASE);
|
||||
aggregators = bigArrays.newObjectArray(1);
|
||||
aggregators.set(0, first);
|
||||
collectors = bigArrays.newObjectArray(1);
|
||||
}
|
||||
|
||||
public Class<?> getWrappedClass() {
|
||||
return first.getClass();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return first.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregationContext context() {
|
||||
return first.context();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator parent() {
|
||||
return first.parent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return first.needsScores();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator subAggregator(String name) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCollection() throws IOException {
|
||||
for (long i = 0; i < aggregators.size(); ++i) {
|
||||
final Aggregator aggregator = aggregators.get(i);
|
||||
if (aggregator != null) {
|
||||
aggregator.preCollection();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCollection() throws IOException {
|
||||
for (long i = 0; i < aggregators.size(); ++i) {
|
||||
final Aggregator aggregator = aggregators.get(i);
|
||||
if (aggregator != null) {
|
||||
aggregator.postCollection();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) {
|
||||
for (long i = 0; i < collectors.size(); ++i) {
|
||||
collectors.set(i, null);
|
||||
}
|
||||
return new LeafBucketCollector() {
|
||||
Scorer scorer;
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
collectors = bigArrays.grow(collectors, bucket + 1);
|
||||
|
||||
LeafBucketCollector collector = collectors.get(bucket);
|
||||
if (collector == null) {
|
||||
aggregators = bigArrays.grow(aggregators, bucket + 1);
|
||||
Aggregator aggregator = aggregators.get(bucket);
|
||||
if (aggregator == null) {
|
||||
aggregator = factory.create(parent, true);
|
||||
aggregator.preCollection();
|
||||
aggregators.set(bucket, aggregator);
|
||||
}
|
||||
collector = aggregator.getLeafCollector(ctx);
|
||||
collector.setScorer(scorer);
|
||||
collectors.set(bucket, collector);
|
||||
}
|
||||
collector.collect(doc, 0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long bucket) throws IOException {
|
||||
if (bucket < aggregators.size()) {
|
||||
Aggregator aggregator = aggregators.get(bucket);
|
||||
if (aggregator != null) {
|
||||
return aggregator.buildAggregation(0);
|
||||
}
|
||||
}
|
||||
return buildEmptyAggregation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
return first.buildEmptyAggregation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(aggregators, collectors);
|
||||
}
|
||||
}
|
||||
|
||||
protected final String name;
|
||||
protected final Type type;
|
||||
protected final AggregatorFactory<?> parent;
|
||||
|
@ -112,120 +238,7 @@ public abstract class AggregatorFactory<AF extends AggregatorFactory<AF>> {
|
|||
final Aggregator parent) throws IOException {
|
||||
final Aggregator first = factory.create(parent, true);
|
||||
final BigArrays bigArrays = context.bigArrays();
|
||||
return new Aggregator() {
|
||||
|
||||
ObjectArray<Aggregator> aggregators;
|
||||
ObjectArray<LeafBucketCollector> collectors;
|
||||
|
||||
{
|
||||
context.searchContext().addReleasable(this, Lifetime.PHASE);
|
||||
aggregators = bigArrays.newObjectArray(1);
|
||||
aggregators.set(0, first);
|
||||
collectors = bigArrays.newObjectArray(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return first.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregationContext context() {
|
||||
return first.context();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator parent() {
|
||||
return first.parent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return first.needsScores();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator subAggregator(String name) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCollection() throws IOException {
|
||||
for (long i = 0; i < aggregators.size(); ++i) {
|
||||
final Aggregator aggregator = aggregators.get(i);
|
||||
if (aggregator != null) {
|
||||
aggregator.preCollection();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCollection() throws IOException {
|
||||
for (long i = 0; i < aggregators.size(); ++i) {
|
||||
final Aggregator aggregator = aggregators.get(i);
|
||||
if (aggregator != null) {
|
||||
aggregator.postCollection();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) {
|
||||
for (long i = 0; i < collectors.size(); ++i) {
|
||||
collectors.set(i, null);
|
||||
}
|
||||
return new LeafBucketCollector() {
|
||||
Scorer scorer;
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
aggregators = bigArrays.grow(aggregators, bucket + 1);
|
||||
collectors = bigArrays.grow(collectors, bucket + 1);
|
||||
|
||||
LeafBucketCollector collector = collectors.get(bucket);
|
||||
if (collector == null) {
|
||||
Aggregator aggregator = aggregators.get(bucket);
|
||||
if (aggregator == null) {
|
||||
aggregator = factory.create(parent, true);
|
||||
aggregator.preCollection();
|
||||
aggregators.set(bucket, aggregator);
|
||||
}
|
||||
collector = aggregator.getLeafCollector(ctx);
|
||||
collector.setScorer(scorer);
|
||||
collectors.set(bucket, collector);
|
||||
}
|
||||
collector.collect(doc, 0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long bucket) throws IOException {
|
||||
if (bucket < aggregators.size()) {
|
||||
Aggregator aggregator = aggregators.get(bucket);
|
||||
if (aggregator != null) {
|
||||
return aggregator.buildAggregation(0);
|
||||
}
|
||||
}
|
||||
return buildEmptyAggregation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
return first.buildEmptyAggregation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(aggregators, collectors);
|
||||
}
|
||||
};
|
||||
return new MultiBucketAggregatorWrapper(bigArrays, context, parent, factory, first);
|
||||
}
|
||||
|
||||
}
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.common.util.LongArray;
|
|||
import org.elasticsearch.common.util.ObjectArray;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.search.aggregations.metrics.geocentroid;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
|
@ -82,9 +82,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
|
|||
counts.increment(bucket, valueCount);
|
||||
// get the previous GeoPoint if a moving avg was computed
|
||||
if (prevCounts > 0) {
|
||||
final GeoPoint centroid = GeoPoint.fromIndexLong(centroids.get(bucket));
|
||||
pt[0] = centroid.lon();
|
||||
pt[1] = centroid.lat();
|
||||
final long mortonCode = centroids.get(bucket);
|
||||
pt[0] = GeoPointField.decodeLongitude(mortonCode);
|
||||
pt[1] = GeoPointField.decodeLatitude(mortonCode);
|
||||
}
|
||||
// update the moving average
|
||||
for (int i = 0; i < valueCount; ++i) {
|
||||
|
@ -92,7 +92,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
|
|||
pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts;
|
||||
pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts;
|
||||
}
|
||||
centroids.set(bucket, GeoEncodingUtils.mortonHash(pt[1], pt[0]));
|
||||
// TODO: we do not need to interleave the lat and lon bits here
|
||||
// should we just store them contiguously?
|
||||
centroids.set(bucket, GeoPointField.encodeLatLon(pt[1], pt[0]));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -104,8 +106,10 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
|
|||
return buildEmptyAggregation();
|
||||
}
|
||||
final long bucketCount = counts.get(bucket);
|
||||
final GeoPoint bucketCentroid = (bucketCount > 0) ? GeoPoint.fromIndexLong(centroids.get(bucket)) :
|
||||
new GeoPoint(Double.NaN, Double.NaN);
|
||||
final long mortonCode = centroids.get(bucket);
|
||||
final GeoPoint bucketCentroid = (bucketCount > 0)
|
||||
? new GeoPoint(GeoPointField.decodeLatitude(mortonCode), GeoPointField.decodeLongitude(mortonCode))
|
||||
: null;
|
||||
return new InternalGeoCentroid(name, bucketCentroid , bucketCount, pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics.geocentroid;
|
||||
|
||||
import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -61,6 +61,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
|
|||
public InternalGeoCentroid(String name, GeoPoint centroid, long count, List<PipelineAggregator>
|
||||
pipelineAggregators, Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
assert (centroid == null) == (count == 0);
|
||||
this.centroid = centroid;
|
||||
assert count >= 0;
|
||||
this.count = count;
|
||||
|
@ -68,7 +69,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
|
|||
|
||||
@Override
|
||||
public GeoPoint centroid() {
|
||||
return (centroid == null || Double.isNaN(centroid.lon()) ? null : centroid);
|
||||
return centroid;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -128,7 +129,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
|
|||
protected void doReadFrom(StreamInput in) throws IOException {
|
||||
count = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
centroid = GeoPoint.fromIndexLong(in.readLong());
|
||||
final long hash = in.readLong();
|
||||
centroid = new GeoPoint(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
|
||||
} else {
|
||||
centroid = null;
|
||||
}
|
||||
|
@ -139,7 +141,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
|
|||
out.writeVLong(count);
|
||||
if (centroid != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeLong(GeoEncodingUtils.mortonHash(centroid.lat(), centroid.lon()));
|
||||
// should we just write lat and lon separately?
|
||||
out.writeLong(GeoPointField.encodeLatLon(centroid.lat(), centroid.lon()));
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
|
|
@ -51,8 +51,9 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider;
|
|||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.profile.query.QueryProfileShardResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
@ -407,7 +408,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
//Collect profile results
|
||||
SearchProfileShardResults shardResults = null;
|
||||
if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
|
||||
Map<String, List<ProfileShardResult>> profileResults = new HashMap<>(queryResults.size());
|
||||
Map<String, ProfileShardResult> profileResults = new HashMap<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
String key = entry.value.queryResult().shardTarget().toString();
|
||||
profileResults.put(key, entry.value.queryResult().profileResults());
|
||||
|
|
|
@ -175,7 +175,7 @@ public final class InnerHitsContext {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
NestedChildrenQuery other = (NestedChildrenQuery) obj;
|
||||
|
@ -187,7 +187,7 @@ public final class InnerHitsContext {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hash = super.hashCode();
|
||||
int hash = classHash();
|
||||
hash = 31 * hash + parentFilter.hashCode();
|
||||
hash = 31 * hash + childFilter.hashCode();
|
||||
hash = 31 * hash + docId;
|
||||
|
|
|
@ -122,7 +122,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
|||
weight = super.createWeight(query, needsScores);
|
||||
} finally {
|
||||
profile.stopAndRecordTime();
|
||||
profiler.pollLastQuery();
|
||||
profiler.pollLastElement();
|
||||
}
|
||||
return new ProfileWeight(query, weight, profile);
|
||||
} else {
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.elasticsearch.search.profile.Profilers;
|
|||
import org.elasticsearch.search.query.QueryPhaseExecutionException;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.rescore.RescoreSearchContext;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.sort.SortAndFormats;
|
||||
import org.elasticsearch.search.suggest.SuggestionSearchContext;
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
private boolean trackScores = false; // when sorting, track scores as well...
|
||||
private FieldDoc searchAfter;
|
||||
// filter for sliced scroll
|
||||
private Query sliceFilter;
|
||||
private SliceBuilder sliceBuilder;
|
||||
|
||||
/**
|
||||
* The original query as sent by the user without the types and aliases
|
||||
|
@ -212,13 +213,23 @@ public class DefaultSearchContext extends SearchContext {
|
|||
if (rescoreContext.window() > maxWindow) {
|
||||
throw new QueryPhaseExecutionException(this, "Rescore window [" + rescoreContext.window() + "] is too large. It must "
|
||||
+ "be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results to be "
|
||||
+ "rescored. This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
|
||||
+ "rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
|
||||
+ "] index level setting.");
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (sliceBuilder != null) {
|
||||
int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll();
|
||||
int numSlices = sliceBuilder.getMax();
|
||||
if (numSlices > sliceLimit) {
|
||||
throw new QueryPhaseExecutionException(this, "The number of slices [" + numSlices + "] is too large. It must "
|
||||
+ "be less than [" + sliceLimit + "]. This limit can be set by changing the [" +
|
||||
IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting.");
|
||||
}
|
||||
}
|
||||
|
||||
// initialize the filtering alias based on the provided filters
|
||||
aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases());
|
||||
|
||||
|
@ -257,9 +268,11 @@ public class DefaultSearchContext extends SearchContext {
|
|||
@Nullable
|
||||
public Query searchFilter(String[] types) {
|
||||
Query typesFilter = createSearchFilter(types, aliasFilter, mapperService().hasNested());
|
||||
if (sliceFilter == null) {
|
||||
if (sliceBuilder == null) {
|
||||
return typesFilter;
|
||||
}
|
||||
Query sliceFilter = sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(),
|
||||
queryShardContext.getIndexSettings().getNumberOfShards());
|
||||
if (typesFilter == null) {
|
||||
return sliceFilter;
|
||||
}
|
||||
|
@ -562,8 +575,8 @@ public class DefaultSearchContext extends SearchContext {
|
|||
return searchAfter;
|
||||
}
|
||||
|
||||
public SearchContext sliceFilter(Query filter) {
|
||||
this.sliceFilter = filter;
|
||||
public SearchContext sliceBuilder(SliceBuilder sliceBuilder) {
|
||||
this.sliceBuilder = sliceBuilder;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,13 +28,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
|
||||
|
@ -99,7 +98,7 @@ public class InternalSearchResponse implements Streamable, ToXContent {
|
|||
*
|
||||
* @return Profile results
|
||||
*/
|
||||
public Map<String, List<ProfileShardResult>> profile() {
|
||||
public Map<String, ProfileShardResult> profile() {
|
||||
if (profileResults == null) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile;
|
||||
|
||||
import org.elasticsearch.search.profile.query.QueryProfileBreakdown;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class AbstractInternalProfileTree<PB extends AbstractProfileBreakdown<?>, E> {
|
||||
|
||||
protected ArrayList<PB> timings;
|
||||
/** Maps the Query to it's list of children. This is basically the dependency tree */
|
||||
protected ArrayList<ArrayList<Integer>> tree;
|
||||
/** A list of the original queries, keyed by index position */
|
||||
protected ArrayList<E> elements;
|
||||
/** A list of top-level "roots". Each root can have its own tree of profiles */
|
||||
protected ArrayList<Integer> roots;
|
||||
/** A temporary stack used to record where we are in the dependency tree. */
|
||||
protected Deque<Integer> stack;
|
||||
private int currentToken = 0;
|
||||
|
||||
public AbstractInternalProfileTree() {
|
||||
timings = new ArrayList<>(10);
|
||||
stack = new ArrayDeque<>(10);
|
||||
tree = new ArrayList<>(10);
|
||||
elements = new ArrayList<>(10);
|
||||
roots = new ArrayList<>(10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those
|
||||
* that are past the rewrite phase and are now being wrapped by createWeight() ) follow
|
||||
* a recursive progression. We can track the dependency tree by a simple stack
|
||||
*
|
||||
* The only hiccup is that the first scoring query will be identical to the last rewritten
|
||||
* query, so we need to take special care to fix that
|
||||
*
|
||||
* @param query The scoring query we wish to profile
|
||||
* @return A ProfileBreakdown for this query
|
||||
*/
|
||||
public PB getProfileBreakdown(E query) {
|
||||
int token = currentToken;
|
||||
|
||||
boolean stackEmpty = stack.isEmpty();
|
||||
|
||||
// If the stack is empty, we are a new root query
|
||||
if (stackEmpty) {
|
||||
|
||||
// We couldn't find a rewritten query to attach to, so just add it as a
|
||||
// top-level root. This is just a precaution: it really shouldn't happen.
|
||||
// We would only get here if a top-level query that never rewrites for some reason.
|
||||
roots.add(token);
|
||||
|
||||
// Increment the token since we are adding a new node, but notably, do not
|
||||
// updateParent() because this was added as a root
|
||||
currentToken += 1;
|
||||
stack.add(token);
|
||||
|
||||
return addDependencyNode(query, token);
|
||||
}
|
||||
|
||||
updateParent(token);
|
||||
|
||||
// Increment the token since we are adding a new node
|
||||
currentToken += 1;
|
||||
stack.add(token);
|
||||
|
||||
return addDependencyNode(query, token);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add a new node to the dependency tree.
|
||||
*
|
||||
* Initializes a new list in the dependency tree, saves the query and
|
||||
* generates a new {@link QueryProfileBreakdown} to track the timings of
|
||||
* this query
|
||||
*
|
||||
* @param element
|
||||
* The element to profile
|
||||
* @param token
|
||||
* The assigned token for this element
|
||||
* @return A ProfileBreakdown to profile this element
|
||||
*/
|
||||
private PB addDependencyNode(E element, int token) {
|
||||
|
||||
// Add a new slot in the dependency tree
|
||||
tree.add(new ArrayList<>(5));
|
||||
|
||||
// Save our query for lookup later
|
||||
elements.add(element);
|
||||
|
||||
PB queryTimings = createProfileBreakdown();
|
||||
timings.add(token, queryTimings);
|
||||
return queryTimings;
|
||||
}
|
||||
|
||||
protected abstract PB createProfileBreakdown();
|
||||
|
||||
/**
|
||||
* Removes the last (e.g. most recent) value on the stack
|
||||
*/
|
||||
public void pollLast() {
|
||||
stack.pollLast();
|
||||
}
|
||||
|
||||
/**
|
||||
* After the query has been run and profiled, we need to merge the flat timing map
|
||||
* with the dependency graph to build a data structure that mirrors the original
|
||||
* query tree
|
||||
*
|
||||
* @return a hierarchical representation of the profiled query tree
|
||||
*/
|
||||
public List<ProfileResult> getTree() {
|
||||
ArrayList<ProfileResult> results = new ArrayList<>(5);
|
||||
for (Integer root : roots) {
|
||||
results.add(doGetTree(root));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursive helper to finalize a node in the dependency tree
|
||||
* @param token The node we are currently finalizing
|
||||
* @return A hierarchical representation of the tree inclusive of children at this level
|
||||
*/
|
||||
private ProfileResult doGetTree(int token) {
|
||||
E element = elements.get(token);
|
||||
PB breakdown = timings.get(token);
|
||||
Map<String, Long> timings = breakdown.toTimingMap();
|
||||
List<Integer> children = tree.get(token);
|
||||
List<ProfileResult> childrenProfileResults = Collections.emptyList();
|
||||
|
||||
if (children != null) {
|
||||
childrenProfileResults = new ArrayList<>(children.size());
|
||||
for (Integer child : children) {
|
||||
ProfileResult childNode = doGetTree(child);
|
||||
childrenProfileResults.add(childNode);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO this would be better done bottom-up instead of top-down to avoid
|
||||
// calculating the same times over and over...but worth the effort?
|
||||
long nodeTime = getNodeTime(timings, childrenProfileResults);
|
||||
String type = getTypeFromElement(element);
|
||||
String description = getDescriptionFromElement(element);
|
||||
return new ProfileResult(type, description, timings, childrenProfileResults, nodeTime);
|
||||
}
|
||||
|
||||
protected abstract String getTypeFromElement(E element);
|
||||
|
||||
protected abstract String getDescriptionFromElement(E element);
|
||||
|
||||
/**
|
||||
* Internal helper to add a child to the current parent node
|
||||
*
|
||||
* @param childToken The child to add to the current parent
|
||||
*/
|
||||
private void updateParent(int childToken) {
|
||||
Integer parent = stack.peekLast();
|
||||
ArrayList<Integer> parentNode = tree.get(parent);
|
||||
parentNode.add(childToken);
|
||||
tree.set(parent, parentNode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to calculate the time of a node, inclusive of children
|
||||
*
|
||||
* @param timings
|
||||
* A map of breakdown timing for the node
|
||||
* @param children
|
||||
* All children profile results at this node
|
||||
* @return The total time at this node, inclusive of children
|
||||
*/
|
||||
private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) {
|
||||
long nodeTime = 0;
|
||||
for (long time : timings.values()) {
|
||||
nodeTime += time;
|
||||
}
|
||||
|
||||
// Then add up our children
|
||||
for (ProfileResult child : children) {
|
||||
nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
|
||||
}
|
||||
return nodeTime;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class AbstractProfiler<PB extends AbstractProfileBreakdown<?>, E> {
|
||||
|
||||
protected final AbstractInternalProfileTree<PB, E> profileTree;
|
||||
|
||||
public AbstractProfiler(AbstractInternalProfileTree<PB, E> profileTree) {
|
||||
this.profileTree = profileTree;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link AbstractProfileBreakdown} for the given element in the
|
||||
* tree, potentially creating it if it did not exist.
|
||||
*/
|
||||
public PB getQueryBreakdown(E query) {
|
||||
return profileTree.getProfileBreakdown(query);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the last (e.g. most recent) element on the stack.
|
||||
*/
|
||||
public void pollLastElement() {
|
||||
profileTree.pollLast();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a hierarchical representation of the profiled tree
|
||||
*/
|
||||
public List<ProfileResult> getTree() {
|
||||
return profileTree.getTree();
|
||||
}
|
||||
|
||||
}
|
|
@ -22,83 +22,50 @@ package org.elasticsearch.search.profile;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||
import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
|
||||
import org.elasticsearch.search.profile.query.QueryProfileShardResult;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A container class to hold the profile results for a single shard in the request.
|
||||
* Contains a list of query profiles, a collector tree and a total rewrite tree.
|
||||
*/
|
||||
public final class ProfileShardResult implements Writeable, ToXContent {
|
||||
public class ProfileShardResult implements Writeable {
|
||||
|
||||
private final List<ProfileResult> queryProfileResults;
|
||||
private final List<QueryProfileShardResult> queryProfileResults;
|
||||
|
||||
private final CollectorResult profileCollector;
|
||||
private final AggregationProfileShardResult aggProfileShardResult;
|
||||
|
||||
private final long rewriteTime;
|
||||
|
||||
public ProfileShardResult(List<ProfileResult> queryProfileResults, long rewriteTime,
|
||||
CollectorResult profileCollector) {
|
||||
assert(profileCollector != null);
|
||||
this.queryProfileResults = queryProfileResults;
|
||||
this.profileCollector = profileCollector;
|
||||
this.rewriteTime = rewriteTime;
|
||||
public ProfileShardResult(List<QueryProfileShardResult> queryProfileResults, AggregationProfileShardResult aggProfileShardResult) {
|
||||
this.aggProfileShardResult = aggProfileShardResult;
|
||||
this.queryProfileResults = Collections.unmodifiableList(queryProfileResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public ProfileShardResult(StreamInput in) throws IOException {
|
||||
int profileSize = in.readVInt();
|
||||
queryProfileResults = new ArrayList<>(profileSize);
|
||||
for (int j = 0; j < profileSize; j++) {
|
||||
queryProfileResults.add(new ProfileResult(in));
|
||||
List<QueryProfileShardResult> queryProfileResults = new ArrayList<>(profileSize);
|
||||
for (int i = 0; i < profileSize; i++) {
|
||||
QueryProfileShardResult result = new QueryProfileShardResult(in);
|
||||
queryProfileResults.add(result);
|
||||
}
|
||||
|
||||
profileCollector = new CollectorResult(in);
|
||||
rewriteTime = in.readLong();
|
||||
this.queryProfileResults = Collections.unmodifiableList(queryProfileResults);
|
||||
this.aggProfileShardResult = new AggregationProfileShardResult(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(queryProfileResults.size());
|
||||
for (ProfileResult p : queryProfileResults) {
|
||||
p.writeTo(out);
|
||||
for (QueryProfileShardResult queryShardResult : queryProfileResults) {
|
||||
queryShardResult.writeTo(out);
|
||||
}
|
||||
profileCollector.writeTo(out);
|
||||
out.writeLong(rewriteTime);
|
||||
aggProfileShardResult.writeTo(out);
|
||||
}
|
||||
|
||||
|
||||
public List<ProfileResult> getQueryResults() {
|
||||
return Collections.unmodifiableList(queryProfileResults);
|
||||
public List<QueryProfileShardResult> getQueryProfileResults() {
|
||||
return queryProfileResults;
|
||||
}
|
||||
|
||||
public long getRewriteTime() {
|
||||
return rewriteTime;
|
||||
}
|
||||
|
||||
public CollectorResult getCollectorResult() {
|
||||
return profileCollector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("query");
|
||||
for (ProfileResult p : queryProfileResults) {
|
||||
p.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.field("rewrite_time", rewriteTime);
|
||||
builder.startArray("collector");
|
||||
profileCollector.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
return builder;
|
||||
public AggregationProfileShardResult getAggregationProfileResults() {
|
||||
return aggProfileShardResult;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,22 +20,25 @@
|
|||
package org.elasticsearch.search.profile;
|
||||
|
||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||
import org.elasticsearch.search.profile.aggregation.AggregationProfiler;
|
||||
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/** Wrapper around several {@link QueryProfiler}s that makes management easier. */
|
||||
/** Wrapper around all the profilers that makes management easier. */
|
||||
public final class Profilers {
|
||||
|
||||
private final ContextIndexSearcher searcher;
|
||||
private final List<QueryProfiler> queryProfilers;
|
||||
private final AggregationProfiler aggProfiler;
|
||||
|
||||
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */
|
||||
public Profilers(ContextIndexSearcher searcher) {
|
||||
this.searcher = searcher;
|
||||
this.queryProfilers = new ArrayList<>();
|
||||
this.aggProfiler = new AggregationProfiler();
|
||||
addQueryProfiler();
|
||||
}
|
||||
|
||||
|
@ -57,4 +60,9 @@ public final class Profilers {
|
|||
return Collections.unmodifiableList(queryProfilers);
|
||||
}
|
||||
|
||||
/** Return the {@link AggregationProfiler}. */
|
||||
public AggregationProfiler getAggregationProfiler() {
|
||||
return aggProfiler;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
|
||||
import org.elasticsearch.search.profile.aggregation.AggregationProfiler;
|
||||
import org.elasticsearch.search.profile.query.QueryProfileShardResult;
|
||||
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,7 +35,6 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A container class to hold all the profile results across all shards. Internally
|
||||
|
@ -40,17 +42,10 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
public final class SearchProfileShardResults implements Writeable, ToXContent{
|
||||
|
||||
private Map<String, List<ProfileShardResult>> shardResults;
|
||||
private Map<String, ProfileShardResult> shardResults;
|
||||
|
||||
public SearchProfileShardResults(Map<String, List<ProfileShardResult>> shardResults) {
|
||||
Map<String, List<ProfileShardResult>> transformed =
|
||||
shardResults.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> Collections.unmodifiableList(e.getValue()))
|
||||
);
|
||||
this.shardResults = Collections.unmodifiableMap(transformed);
|
||||
public SearchProfileShardResults(Map<String, ProfileShardResult> shardResults) {
|
||||
this.shardResults = Collections.unmodifiableMap(shardResults);
|
||||
}
|
||||
|
||||
public SearchProfileShardResults(StreamInput in) throws IOException {
|
||||
|
@ -59,33 +54,22 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
|
|||
|
||||
for (int i = 0; i < size; i++) {
|
||||
String key = in.readString();
|
||||
int shardResultsSize = in.readInt();
|
||||
|
||||
List<ProfileShardResult> shardResult = new ArrayList<>(shardResultsSize);
|
||||
|
||||
for (int j = 0; j < shardResultsSize; j++) {
|
||||
ProfileShardResult result = new ProfileShardResult(in);
|
||||
shardResult.add(result);
|
||||
}
|
||||
shardResults.put(key, Collections.unmodifiableList(shardResult));
|
||||
ProfileShardResult shardResult = new ProfileShardResult(in);
|
||||
shardResults.put(key, shardResult);
|
||||
}
|
||||
shardResults = Collections.unmodifiableMap(shardResults);
|
||||
}
|
||||
|
||||
public Map<String, List<ProfileShardResult>> getShardResults() {
|
||||
public Map<String, ProfileShardResult> getShardResults() {
|
||||
return this.shardResults;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(shardResults.size());
|
||||
for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
|
||||
for (Map.Entry<String, ProfileShardResult> entry : shardResults.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeInt(entry.getValue().size());
|
||||
|
||||
for (ProfileShardResult result : entry.getValue()) {
|
||||
result.writeTo(out);
|
||||
}
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,14 +77,18 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("profile").startArray("shards");
|
||||
|
||||
for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
|
||||
builder.startObject().field("id",entry.getKey()).startArray("searches");
|
||||
for (ProfileShardResult result : entry.getValue()) {
|
||||
for (Map.Entry<String, ProfileShardResult> entry : shardResults.entrySet()) {
|
||||
builder.startObject();
|
||||
builder.field("id", entry.getKey());
|
||||
builder.startArray("searches");
|
||||
for (QueryProfileShardResult result : entry.getValue().getQueryProfileResults()) {
|
||||
builder.startObject();
|
||||
result.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray().endObject();
|
||||
builder.endArray();
|
||||
entry.getValue().getAggregationProfileResults().toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endArray().endObject();
|
||||
|
@ -112,16 +100,20 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
|
|||
* can be serialized to other nodes, emitted as JSON, etc.
|
||||
*
|
||||
* @param profilers
|
||||
* A list of Profilers to convert into
|
||||
* InternalProfileShardResults
|
||||
* @return A list of corresponding InternalProfileShardResults
|
||||
* The {@link Profilers} to convert into results
|
||||
* @return A {@link ProfileShardResult} representing the results for this
|
||||
* shard
|
||||
*/
|
||||
public static List<ProfileShardResult> buildShardResults(List<QueryProfiler> profilers) {
|
||||
List<ProfileShardResult> results = new ArrayList<>(profilers.size());
|
||||
for (QueryProfiler profiler : profilers) {
|
||||
ProfileShardResult result = new ProfileShardResult(profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector());
|
||||
results.add(result);
|
||||
public static ProfileShardResult buildShardResults(Profilers profilers) {
|
||||
List<QueryProfiler> queryProfilers = profilers.getQueryProfilers();
|
||||
AggregationProfiler aggProfiler = profilers.getAggregationProfiler();
|
||||
List<QueryProfileShardResult> queryResults = new ArrayList<>(queryProfilers.size());
|
||||
for (QueryProfiler queryProfiler : queryProfilers) {
|
||||
QueryProfileShardResult result = new QueryProfileShardResult(queryProfiler.getTree(), queryProfiler.getRewriteTime(),
|
||||
queryProfiler.getCollector());
|
||||
queryResults.add(result);
|
||||
}
|
||||
return results;
|
||||
AggregationProfileShardResult aggResults = new AggregationProfileShardResult(aggProfiler.getTree());
|
||||
return new ProfileShardResult(queryResults, aggResults);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,21 +17,14 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.painless;
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.elasticsearch.painless.Variables.Reserved;
|
||||
import org.elasticsearch.painless.node.SSource;
|
||||
import org.elasticsearch.search.profile.AbstractProfileBreakdown;
|
||||
|
||||
/**
|
||||
* Runs the analysis phase of compilation using the Painless AST.
|
||||
*/
|
||||
final class Analyzer {
|
||||
static Variables analyze(Reserved shortcut, SSource root) {
|
||||
Variables variables = new Variables(shortcut);
|
||||
root.analyze(variables);
|
||||
public class AggregationProfileBreakdown extends AbstractProfileBreakdown<AggregationTimingType> {
|
||||
|
||||
return variables;
|
||||
public AggregationProfileBreakdown() {
|
||||
super(AggregationTimingType.values());
|
||||
}
|
||||
|
||||
private Analyzer() {}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A container class to hold the profile results for a single shard in the request.
|
||||
* Contains a list of query profiles, a collector tree and a total rewrite tree.
|
||||
*/
|
||||
public final class AggregationProfileShardResult implements Writeable, ToXContent {
|
||||
|
||||
private final List<ProfileResult> aggProfileResults;
|
||||
|
||||
public AggregationProfileShardResult(List<ProfileResult> aggProfileResults) {
|
||||
this.aggProfileResults = aggProfileResults;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public AggregationProfileShardResult(StreamInput in) throws IOException {
|
||||
int profileSize = in.readVInt();
|
||||
aggProfileResults = new ArrayList<>(profileSize);
|
||||
for (int j = 0; j < profileSize; j++) {
|
||||
aggProfileResults.add(new ProfileResult(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(aggProfileResults.size());
|
||||
for (ProfileResult p : aggProfileResults) {
|
||||
p.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public List<ProfileResult> getProfileResults() {
|
||||
return Collections.unmodifiableList(aggProfileResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("aggregations");
|
||||
for (ProfileResult p : aggProfileResults) {
|
||||
p.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.profile.AbstractProfiler;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class AggregationProfiler extends AbstractProfiler<AggregationProfileBreakdown, Aggregator> {
|
||||
|
||||
private final Map<List<String>, AggregationProfileBreakdown> profileBrakdownLookup = new HashMap<>();
|
||||
|
||||
public AggregationProfiler() {
|
||||
super(new InternalAggregationProfileTree());
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregationProfileBreakdown getQueryBreakdown(Aggregator agg) {
|
||||
List<String> path = getAggregatorPath(agg);
|
||||
AggregationProfileBreakdown aggregationProfileBreakdown = profileBrakdownLookup.get(path);
|
||||
if (aggregationProfileBreakdown == null) {
|
||||
aggregationProfileBreakdown = super.getQueryBreakdown(agg);
|
||||
profileBrakdownLookup.put(path, aggregationProfileBreakdown);
|
||||
}
|
||||
return aggregationProfileBreakdown;
|
||||
}
|
||||
|
||||
public static List<String> getAggregatorPath(Aggregator agg) {
|
||||
LinkedList<String> path = new LinkedList<>();
|
||||
while (agg != null) {
|
||||
path.addFirst(agg.name());
|
||||
agg = agg.parent();
|
||||
}
|
||||
return path;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum AggregationTimingType {
|
||||
INITIALIZE,
|
||||
COLLECT,
|
||||
BUILD_AGGREGATION,
|
||||
REDUCE;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory.MultiBucketAggregatorWrapper;
|
||||
import org.elasticsearch.search.profile.AbstractInternalProfileTree;
|
||||
|
||||
public class InternalAggregationProfileTree extends AbstractInternalProfileTree<AggregationProfileBreakdown, Aggregator> {
|
||||
|
||||
@Override
|
||||
protected AggregationProfileBreakdown createProfileBreakdown() {
|
||||
return new AggregationProfileBreakdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTypeFromElement(Aggregator element) {
|
||||
if (element instanceof MultiBucketAggregatorWrapper) {
|
||||
return ((MultiBucketAggregatorWrapper) element).getWrappedClass().getName();
|
||||
}
|
||||
return element.getClass().getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getDescriptionFromElement(Aggregator element) {
|
||||
return element.name();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ProfilingAggregator extends Aggregator {
|
||||
|
||||
private final Aggregator delegate;
|
||||
private final AggregationProfiler profiler;
|
||||
private AggregationProfileBreakdown profileBreakdown;
|
||||
|
||||
public ProfilingAggregator(Aggregator delegate, AggregationProfiler profiler) throws IOException {
|
||||
this.profiler = profiler;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
delegate.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return delegate.needsScores();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return delegate.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregationContext context() {
|
||||
return delegate.context();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator parent() {
|
||||
return delegate.parent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregator subAggregator(String name) {
|
||||
return delegate.subAggregator(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long bucket) throws IOException {
|
||||
profileBreakdown.startTime(AggregationTimingType.BUILD_AGGREGATION);
|
||||
InternalAggregation result = delegate.buildAggregation(bucket);
|
||||
profileBreakdown.stopAndRecordTime();
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
return delegate.buildEmptyAggregation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException {
|
||||
return new ProfilingLeafBucketCollector(delegate.getLeafCollector(ctx), profileBreakdown);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCollection() throws IOException {
|
||||
this.profileBreakdown = profiler.getQueryBreakdown(delegate);
|
||||
profileBreakdown.startTime(AggregationTimingType.INITIALIZE);
|
||||
delegate.preCollection();
|
||||
profileBreakdown.stopAndRecordTime();
|
||||
profiler.pollLastElement();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCollection() throws IOException {
|
||||
delegate.postCollection();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ProfilingLeafBucketCollector extends LeafBucketCollector {
|
||||
|
||||
private LeafBucketCollector delegate;
|
||||
private AggregationProfileBreakdown profileBreakdown;
|
||||
|
||||
public ProfilingLeafBucketCollector(LeafBucketCollector delegate, AggregationProfileBreakdown profileBreakdown) {
|
||||
this.delegate = delegate;
|
||||
this.profileBreakdown = profileBreakdown;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
profileBreakdown.startTime(AggregationTimingType.COLLECT);
|
||||
delegate.collect(doc, bucket);
|
||||
profileBreakdown.stopAndRecordTime();
|
||||
}
|
||||
|
||||
}
|
|
@ -20,89 +20,33 @@
|
|||
package org.elasticsearch.search.profile.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.search.profile.AbstractInternalProfileTree;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.LinkedBlockingDeque;
|
||||
|
||||
/**
|
||||
* This class tracks the dependency tree for queries (scoring and rewriting) and
|
||||
* generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree
|
||||
* and returns a list of {@link ProfileResult} that can be serialized back to the client
|
||||
*/
|
||||
final class InternalQueryProfileTree {
|
||||
|
||||
private ArrayList<QueryProfileBreakdown> timings;
|
||||
|
||||
/** Maps the Query to it's list of children. This is basically the dependency tree */
|
||||
private ArrayList<ArrayList<Integer>> tree;
|
||||
|
||||
/** A list of the original queries, keyed by index position */
|
||||
private ArrayList<Query> queries;
|
||||
|
||||
/** A list of top-level "roots". Each root can have its own tree of profiles */
|
||||
private ArrayList<Integer> roots;
|
||||
final class InternalQueryProfileTree extends AbstractInternalProfileTree<QueryProfileBreakdown, Query> {
|
||||
|
||||
/** Rewrite time */
|
||||
private long rewriteTime;
|
||||
private long rewriteScratch;
|
||||
|
||||
/** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */
|
||||
private Deque<Integer> stack;
|
||||
|
||||
private int currentToken = 0;
|
||||
|
||||
public InternalQueryProfileTree() {
|
||||
timings = new ArrayList<>(10);
|
||||
stack = new LinkedBlockingDeque<>(10);
|
||||
tree = new ArrayList<>(10);
|
||||
queries = new ArrayList<>(10);
|
||||
roots = new ArrayList<>(10);
|
||||
@Override
|
||||
protected QueryProfileBreakdown createProfileBreakdown() {
|
||||
return new QueryProfileBreakdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those
|
||||
* that are past the rewrite phase and are now being wrapped by createWeight() ) follow
|
||||
* a recursive progression. We can track the dependency tree by a simple stack
|
||||
*
|
||||
* The only hiccup is that the first scoring query will be identical to the last rewritten
|
||||
* query, so we need to take special care to fix that
|
||||
*
|
||||
* @param query The scoring query we wish to profile
|
||||
* @return A ProfileBreakdown for this query
|
||||
*/
|
||||
public QueryProfileBreakdown getQueryBreakdown(Query query) {
|
||||
int token = currentToken;
|
||||
@Override
|
||||
protected String getTypeFromElement(Query query) {
|
||||
return query.getClass().getSimpleName();
|
||||
}
|
||||
|
||||
boolean stackEmpty = stack.isEmpty();
|
||||
|
||||
// If the stack is empty, we are a new root query
|
||||
if (stackEmpty) {
|
||||
|
||||
// We couldn't find a rewritten query to attach to, so just add it as a
|
||||
// top-level root. This is just a precaution: it really shouldn't happen.
|
||||
// We would only get here if a top-level query that never rewrites for some reason.
|
||||
roots.add(token);
|
||||
|
||||
// Increment the token since we are adding a new node, but notably, do not
|
||||
// updateParent() because this was added as a root
|
||||
currentToken += 1;
|
||||
stack.add(token);
|
||||
|
||||
return addDependencyNode(query, token);
|
||||
}
|
||||
|
||||
updateParent(token);
|
||||
|
||||
// Increment the token since we are adding a new node
|
||||
currentToken += 1;
|
||||
stack.add(token);
|
||||
|
||||
return addDependencyNode(query, token);
|
||||
@Override
|
||||
protected String getDescriptionFromElement(Query query) {
|
||||
return query.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -128,113 +72,7 @@ final class InternalQueryProfileTree {
|
|||
return time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add a new node to the dependency tree.
|
||||
*
|
||||
* Initializes a new list in the dependency tree, saves the query and
|
||||
* generates a new {@link QueryProfileBreakdown} to track the timings
|
||||
* of this query
|
||||
*
|
||||
* @param query The query to profile
|
||||
* @param token The assigned token for this query
|
||||
* @return A ProfileBreakdown to profile this query
|
||||
*/
|
||||
private QueryProfileBreakdown addDependencyNode(Query query, int token) {
|
||||
|
||||
// Add a new slot in the dependency tree
|
||||
tree.add(new ArrayList<>(5));
|
||||
|
||||
// Save our query for lookup later
|
||||
queries.add(query);
|
||||
|
||||
QueryProfileBreakdown queryTimings = new QueryProfileBreakdown();
|
||||
timings.add(token, queryTimings);
|
||||
return queryTimings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the last (e.g. most recent) value on the stack
|
||||
*/
|
||||
public void pollLast() {
|
||||
stack.pollLast();
|
||||
}
|
||||
|
||||
/**
|
||||
* After the query has been run and profiled, we need to merge the flat timing map
|
||||
* with the dependency graph to build a data structure that mirrors the original
|
||||
* query tree
|
||||
*
|
||||
* @return a hierarchical representation of the profiled query tree
|
||||
*/
|
||||
public List<ProfileResult> getQueryTree() {
|
||||
ArrayList<ProfileResult> results = new ArrayList<>(5);
|
||||
for (Integer root : roots) {
|
||||
results.add(doGetQueryTree(root));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursive helper to finalize a node in the dependency tree
|
||||
* @param token The node we are currently finalizing
|
||||
* @return A hierarchical representation of the tree inclusive of children at this level
|
||||
*/
|
||||
private ProfileResult doGetQueryTree(int token) {
|
||||
Query query = queries.get(token);
|
||||
QueryProfileBreakdown breakdown = timings.get(token);
|
||||
Map<String, Long> timings = breakdown.toTimingMap();
|
||||
List<Integer> children = tree.get(token);
|
||||
List<ProfileResult> childrenProfileResults = Collections.emptyList();
|
||||
|
||||
if (children != null) {
|
||||
childrenProfileResults = new ArrayList<>(children.size());
|
||||
for (Integer child : children) {
|
||||
ProfileResult childNode = doGetQueryTree(child);
|
||||
childrenProfileResults.add(childNode);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO this would be better done bottom-up instead of top-down to avoid
|
||||
// calculating the same times over and over...but worth the effort?
|
||||
long nodeTime = getNodeTime(timings, childrenProfileResults);
|
||||
String queryDescription = query.getClass().getSimpleName();
|
||||
String luceneName = query.toString();
|
||||
return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime);
|
||||
}
|
||||
|
||||
public long getRewriteTime() {
|
||||
return rewriteTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to add a child to the current parent node
|
||||
*
|
||||
* @param childToken The child to add to the current parent
|
||||
*/
|
||||
private void updateParent(int childToken) {
|
||||
Integer parent = stack.peekLast();
|
||||
ArrayList<Integer> parentNode = tree.get(parent);
|
||||
parentNode.add(childToken);
|
||||
tree.set(parent, parentNode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to calculate the time of a node, inclusive of children
|
||||
*
|
||||
* @param timings A map of breakdown timing for the node
|
||||
* @param children All children profile results at this node
|
||||
* @return The total time at this node, inclusive of children
|
||||
*/
|
||||
private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) {
|
||||
long nodeTime = 0;
|
||||
for (long time : timings.values()) {
|
||||
nodeTime += time;
|
||||
}
|
||||
|
||||
// Then add up our children
|
||||
for (ProfileResult child : children) {
|
||||
nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
|
||||
}
|
||||
return nodeTime;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.profile.query;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A container class to hold the profile results for a single shard in the request.
|
||||
* Contains a list of query profiles, a collector tree and a total rewrite tree.
|
||||
*/
|
||||
public final class QueryProfileShardResult implements Writeable, ToXContent {
|
||||
|
||||
private final List<ProfileResult> queryProfileResults;
|
||||
|
||||
private final CollectorResult profileCollector;
|
||||
|
||||
private final long rewriteTime;
|
||||
|
||||
public QueryProfileShardResult(List<ProfileResult> queryProfileResults, long rewriteTime,
|
||||
CollectorResult profileCollector) {
|
||||
assert(profileCollector != null);
|
||||
this.queryProfileResults = queryProfileResults;
|
||||
this.profileCollector = profileCollector;
|
||||
this.rewriteTime = rewriteTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public QueryProfileShardResult(StreamInput in) throws IOException {
|
||||
int profileSize = in.readVInt();
|
||||
queryProfileResults = new ArrayList<>(profileSize);
|
||||
for (int j = 0; j < profileSize; j++) {
|
||||
queryProfileResults.add(new ProfileResult(in));
|
||||
}
|
||||
|
||||
profileCollector = new CollectorResult(in);
|
||||
rewriteTime = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(queryProfileResults.size());
|
||||
for (ProfileResult p : queryProfileResults) {
|
||||
p.writeTo(out);
|
||||
}
|
||||
profileCollector.writeTo(out);
|
||||
out.writeLong(rewriteTime);
|
||||
}
|
||||
|
||||
|
||||
public List<ProfileResult> getQueryResults() {
|
||||
return Collections.unmodifiableList(queryProfileResults);
|
||||
}
|
||||
|
||||
public long getRewriteTime() {
|
||||
return rewriteTime;
|
||||
}
|
||||
|
||||
public CollectorResult getCollectorResult() {
|
||||
return profileCollector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("query");
|
||||
for (ProfileResult p : queryProfileResults) {
|
||||
p.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.field("rewrite_time", rewriteTime);
|
||||
builder.startArray("collector");
|
||||
profileCollector.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.search.profile.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
import org.elasticsearch.search.profile.AbstractProfiler;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -36,16 +35,16 @@ import java.util.Objects;
|
|||
* request may execute two searches (query + global agg). A Profiler just
|
||||
* represents one of those
|
||||
*/
|
||||
public final class QueryProfiler {
|
||||
|
||||
private final InternalQueryProfileTree queryTree = new InternalQueryProfileTree();
|
||||
public final class QueryProfiler extends AbstractProfiler<QueryProfileBreakdown, Query> {
|
||||
|
||||
/**
|
||||
* The root Collector used in the search
|
||||
*/
|
||||
private InternalProfileCollector collector;
|
||||
|
||||
public QueryProfiler() {}
|
||||
public QueryProfiler() {
|
||||
super(new InternalQueryProfileTree());
|
||||
}
|
||||
|
||||
/** Set the collector that is associated with this profiler. */
|
||||
public void setCollector(InternalProfileCollector collector) {
|
||||
|
@ -55,21 +54,12 @@ public final class QueryProfiler {
|
|||
this.collector = Objects.requireNonNull(collector);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link QueryProfileBreakdown} for the given query, potentially creating it if it did not exist.
|
||||
* This should only be used for queries that will be undergoing scoring. Do not use it to profile the
|
||||
* rewriting phase
|
||||
*/
|
||||
public QueryProfileBreakdown getQueryBreakdown(Query query) {
|
||||
return queryTree.getQueryBreakdown(query);
|
||||
}
|
||||
|
||||
/**
|
||||
* Begin timing the rewrite phase of a request. All rewrites are accumulated together into a
|
||||
* single metric
|
||||
*/
|
||||
public void startRewriteTime() {
|
||||
queryTree.startRewriteTime();
|
||||
((InternalQueryProfileTree) profileTree).startRewriteTime();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -79,29 +69,14 @@ public final class QueryProfiler {
|
|||
* @return cumulative rewrite time
|
||||
*/
|
||||
public long stopAndAddRewriteTime() {
|
||||
return queryTree.stopAndAddRewriteTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the last (e.g. most recent) query on the stack. This should only be called for scoring
|
||||
* queries, not rewritten queries
|
||||
*/
|
||||
public void pollLastQuery() {
|
||||
queryTree.pollLast();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a hierarchical representation of the profiled query tree
|
||||
*/
|
||||
public List<ProfileResult> getQueryTree() {
|
||||
return queryTree.getQueryTree();
|
||||
return ((InternalQueryProfileTree) profileTree).stopAndAddRewriteTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total time taken to rewrite all queries in this profile
|
||||
*/
|
||||
public long getRewriteTime() {
|
||||
return queryTree.getRewriteTime();
|
||||
return ((InternalQueryProfileTree) profileTree).getRewriteTime();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -112,8 +112,8 @@ public class QueryPhase implements SearchPhase {
|
|||
aggregationPhase.execute(searchContext);
|
||||
|
||||
if (searchContext.getProfilers() != null) {
|
||||
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
||||
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
|
||||
ProfileShardResult shardResults = SearchProfileShardResults
|
||||
.buildShardResults(searchContext.getProfilers());
|
||||
searchContext.queryResult().profileResults(shardResults);
|
||||
}
|
||||
}
|
||||
|
@ -385,8 +385,8 @@ public class QueryPhase implements SearchPhase {
|
|||
queryResult.topDocs(topDocsCallable.call(), sortValueFormats);
|
||||
|
||||
if (searchContext.getProfilers() != null) {
|
||||
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
||||
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
|
||||
ProfileShardResult shardResults = SearchProfileShardResults
|
||||
.buildShardResults(searchContext.getProfilers());
|
||||
searchContext.queryResult().profileResults(shardResults);
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
|||
private Suggest suggest;
|
||||
private boolean searchTimedOut;
|
||||
private Boolean terminatedEarly = null;
|
||||
private List<ProfileShardResult> profileShardResults;
|
||||
private ProfileShardResult profileShardResults;
|
||||
|
||||
public QuerySearchResult() {
|
||||
|
||||
|
@ -143,7 +143,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
|||
* Returns the profiled results for this search, or potentially null if result was empty
|
||||
* @return The profiled results, or null
|
||||
*/
|
||||
public @Nullable List<ProfileShardResult> profileResults() {
|
||||
public @Nullable ProfileShardResult profileResults() {
|
||||
return profileShardResults;
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
|||
* Sets the finalized profiling results for this query
|
||||
* @param shardResults The finalized profile
|
||||
*/
|
||||
public void profileResults(List<ProfileShardResult> shardResults) {
|
||||
public void profileResults(ProfileShardResult shardResults) {
|
||||
this.profileShardResults = shardResults;
|
||||
}
|
||||
|
||||
|
@ -237,12 +237,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
|||
terminatedEarly = in.readOptionalBoolean();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
|
||||
int profileSize = in.readVInt();
|
||||
profileShardResults = new ArrayList<>(profileSize);
|
||||
for (int i = 0; i < profileSize; i++) {
|
||||
ProfileShardResult result = new ProfileShardResult(in);
|
||||
profileShardResults.add(result);
|
||||
}
|
||||
profileShardResults = new ProfileShardResult(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -296,10 +291,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(profileShardResults.size());
|
||||
for (ProfileShardResult shardResult : profileShardResults) {
|
||||
shardResult.writeTo(out);
|
||||
}
|
||||
profileShardResults.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ public abstract class SliceQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (super.equals(o) == false) {
|
||||
if (sameClassAs(o) == false) {
|
||||
return false;
|
||||
}
|
||||
SliceQuery that = (SliceQuery) o;
|
||||
|
@ -70,7 +70,7 @@ public abstract class SliceQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), field, id, max);
|
||||
return Objects.hash(classHash(), field, id, max);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -74,11 +74,7 @@ public final class TermsSliceQuery extends SliceQuery {
|
|||
int hashCode = term.hashCode();
|
||||
if (contains(hashCode)) {
|
||||
docsEnum = te.postings(docsEnum, PostingsEnum.NONE);
|
||||
int docId = docsEnum.nextDoc();
|
||||
while (docId != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
builder.add(docId);
|
||||
docId = docsEnum.nextDoc();
|
||||
}
|
||||
builder.add(docsEnum);
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
|
|
|
@ -42,9 +42,6 @@ import org.jboss.netty.channel.ChannelFutureListener;
|
|||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class NettyTransportChannel implements TransportChannel {
|
||||
|
||||
private final NettyTransport transport;
|
||||
|
@ -55,7 +52,7 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
private final long requestId;
|
||||
private final String profileName;
|
||||
private final long reservedBytes;
|
||||
private final AtomicBoolean closed = new AtomicBoolean();
|
||||
private final AtomicBoolean released = new AtomicBoolean();
|
||||
|
||||
public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel,
|
||||
long requestId, Version version, String profileName, long reservedBytes) {
|
||||
|
@ -86,7 +83,7 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
|
||||
@Override
|
||||
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
|
||||
close();
|
||||
release();
|
||||
if (transport.compress) {
|
||||
options = TransportResponseOptions.builder(options).withCompress(transport.compress).build();
|
||||
}
|
||||
|
@ -128,7 +125,7 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
|
||||
@Override
|
||||
public void sendResponse(Throwable error) throws IOException {
|
||||
close();
|
||||
release();
|
||||
BytesStreamOutput stream = new BytesStreamOutput();
|
||||
stream.skip(NettyHeader.HEADER_SIZE);
|
||||
RemoteTransportException tx = new RemoteTransportException(
|
||||
|
@ -147,10 +144,10 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
future.addListener(onResponseSentListener);
|
||||
}
|
||||
|
||||
private void close() {
|
||||
// attempt to close once atomically
|
||||
if (closed.compareAndSet(false, true) == false) {
|
||||
throw new IllegalStateException("Channel is already closed");
|
||||
private void release() {
|
||||
// attempt to release once atomically
|
||||
if (released.compareAndSet(false, true) == false) {
|
||||
throw new IllegalStateException("reserved bytes are already released");
|
||||
}
|
||||
transport.inFlightRequestsBreaker().addWithoutBreaking(-reservedBytes);
|
||||
}
|
||||
|
@ -174,4 +171,5 @@ public class NettyTransportChannel implements TransportChannel {
|
|||
public Channel getChannel() {
|
||||
return channel;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" {
|
|||
//// Very special jar permissions:
|
||||
//// These are dangerous permissions that we don't want to grant to everything.
|
||||
|
||||
grant codeBase "${codebase.lucene-core-6.0.1.jar}" {
|
||||
grant codeBase "${codebase.lucene-core-6.1.0-snapshot-3a57bea.jar}" {
|
||||
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
|
||||
// java 8 package
|
||||
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
|
||||
|
@ -42,6 +42,11 @@ grant codeBase "${codebase.lucene-core-6.0.1.jar}" {
|
|||
permission java.lang.RuntimePermission "accessDeclaredMembers";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.lucene-misc-6.1.0-snapshot-3a57bea.jar}" {
|
||||
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
|
||||
permission java.nio.file.LinkPermission "hard";
|
||||
};
|
||||
|
||||
//// Everything else:
|
||||
|
||||
grant {
|
||||
|
|
|
@ -31,9 +31,11 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
|
|||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.lucene-test-framework-6.0.1.jar}" {
|
||||
grant codeBase "${codebase.lucene-test-framework-6.1.0-snapshot-3a57bea.jar}" {
|
||||
// needed by RamUsageTester
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS
|
||||
permission java.nio.file.LinkPermission "hard";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" {
|
||||
|
|
|
@ -270,7 +270,8 @@ public class VersionTests extends ESTestCase {
|
|||
assertTrue("lucene versions must be " + other + " >= " + version,
|
||||
other.luceneVersion.onOrAfter(version.luceneVersion));
|
||||
}
|
||||
if (other.major == version.major && other.minor == version.minor) {
|
||||
if (other.isAlpha() == false && version.isAlpha() == false
|
||||
&& other.major == version.major && other.minor == version.minor) {
|
||||
assertEquals(other.luceneVersion.major, version.luceneVersion.major);
|
||||
assertEquals(other.luceneVersion.minor, version.luceneVersion.minor);
|
||||
// should we also assert the lucene bugfix version?
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.master;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
||||
public class TransportMasterNodeActionUtils {
|
||||
|
||||
/**
|
||||
* Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is
|
||||
* a protected method.
|
||||
*/
|
||||
public static <Request extends MasterNodeRequest<Request>, Response extends ActionResponse> void runMasterOperation(
|
||||
TransportMasterNodeAction<Request, Response> masterNodeAction, Request request, ClusterState clusterState,
|
||||
ActionListener<Response> actionListener) throws Exception {
|
||||
assert masterNodeAction.checkBlock(request, clusterState) == null;
|
||||
masterNodeAction.masterOperation(request, clusterState, actionListener);
|
||||
}
|
||||
}
|
|
@ -72,4 +72,8 @@ public class JavaVersionTests extends ESTestCase {
|
|||
assertFalse(JavaVersion.isValid(version));
|
||||
}
|
||||
}
|
||||
|
||||
public void testJava8Compat() {
|
||||
assertEquals(JavaVersion.parse("1.8"), JavaVersion.parse("8"));
|
||||
}
|
||||
}
|
|
@ -1,4 +1,3 @@
|
|||
/*
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -46,6 +45,7 @@ import java.nio.file.DirectoryStream;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.SortedSet;
|
||||
|
@ -127,6 +127,44 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRestoreSnapshotWithMissingChecksum() throws Exception {
|
||||
final String repo = "test_repo";
|
||||
final String snapshot = "test_1";
|
||||
final String indexName = "index-2.3.4";
|
||||
final String repoFileId = "missing-checksum-repo-2.3.4";
|
||||
Path repoFile = getBwcIndicesPath().resolve(repoFileId + ".zip");
|
||||
URI repoFileUri = repoFile.toUri();
|
||||
URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/");
|
||||
logger.info("--> creating repository [{}] for repo file [{}]", repo, repoFileId);
|
||||
assertAcked(client().admin().cluster().preparePutRepository(repo)
|
||||
.setType("url")
|
||||
.setSettings(Settings.builder().put("url", repoJarUri.toString())));
|
||||
|
||||
logger.info("--> get snapshot and check its indices");
|
||||
GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get();
|
||||
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
||||
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
||||
assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName)));
|
||||
|
||||
logger.info("--> restoring snapshot");
|
||||
RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
RestoreInfo restoreInfo = response.getRestoreInfo();
|
||||
assertThat(restoreInfo.successfulShards(), greaterThan(0));
|
||||
assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards()));
|
||||
assertThat(restoreInfo.failedShards(), equalTo(0));
|
||||
String index = restoreInfo.indices().get(0);
|
||||
assertThat(index, equalTo(indexName));
|
||||
|
||||
logger.info("--> check search");
|
||||
SearchResponse searchResponse = client().prepareSearch(index).get();
|
||||
assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
|
||||
|
||||
logger.info("--> cleanup");
|
||||
cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()]));
|
||||
cluster().wipeTemplates();
|
||||
}
|
||||
|
||||
private List<String> repoVersions() throws Exception {
|
||||
return listRepoVersions("repo");
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -54,11 +55,17 @@ public class ClusterChangedEventTests extends ESTestCase {
|
|||
|
||||
private static final ClusterName TEST_CLUSTER_NAME = new ClusterName("test");
|
||||
private static final String NODE_ID_PREFIX = "node_";
|
||||
private static final String INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID();
|
||||
// the initial indices which every cluster state test starts out with
|
||||
private static final List<Index> initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()),
|
||||
new Index("idx2", UUIDs.randomBase64UUID()),
|
||||
new Index("idx3", UUIDs.randomBase64UUID()));
|
||||
private static String INITIAL_CLUSTER_ID;
|
||||
private static List<Index> initialIndices;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID();
|
||||
// the initial indices which every cluster state test starts out with
|
||||
initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()),
|
||||
new Index("idx2", UUIDs.randomBase64UUID()),
|
||||
new Index("idx3", UUIDs.randomBase64UUID()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test basic properties of the ClusterChangedEvent class:
|
||||
|
@ -140,24 +147,24 @@ public class ClusterChangedEventTests extends ESTestCase {
|
|||
*/
|
||||
public void testIndexMetaDataChange() {
|
||||
final int numNodesInCluster = 3;
|
||||
final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
|
||||
final ClusterState newState = originalState; // doesn't matter for this test, just need a non-null value
|
||||
final ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
|
||||
final ClusterState state = createState(numNodesInCluster, randomBoolean(), initialIndices);
|
||||
|
||||
// test when its not the same IndexMetaData
|
||||
final Index index = initialIndices.get(0);
|
||||
final IndexMetaData originalIndexMeta = originalState.metaData().index(index);
|
||||
final IndexMetaData originalIndexMeta = state.metaData().index(index);
|
||||
// make sure the metadata is actually on the cluster state
|
||||
assertNotNull("IndexMetaData for " + index + " should exist on the cluster state", originalIndexMeta);
|
||||
IndexMetaData newIndexMeta = createIndexMetadata(index, originalIndexMeta.getVersion() + 1);
|
||||
assertTrue("IndexMetaData with different version numbers must be considered changed", event.indexMetaDataChanged(newIndexMeta));
|
||||
assertTrue("IndexMetaData with different version numbers must be considered changed",
|
||||
ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta));
|
||||
|
||||
// test when it doesn't exist
|
||||
newIndexMeta = createIndexMetadata(new Index("doesntexist", UUIDs.randomBase64UUID()));
|
||||
assertTrue("IndexMetaData that didn't previously exist should be considered changed", event.indexMetaDataChanged(newIndexMeta));
|
||||
assertTrue("IndexMetaData that didn't previously exist should be considered changed",
|
||||
ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta));
|
||||
|
||||
// test when its the same IndexMetaData
|
||||
assertFalse("IndexMetaData should be the same", event.indexMetaDataChanged(originalIndexMeta));
|
||||
assertFalse("IndexMetaData should be the same", ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, originalIndexMeta));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -161,7 +161,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static final class RandomAllocationDecider extends AllocationDecider {
|
||||
public static final class RandomAllocationDecider extends AllocationDecider {
|
||||
|
||||
private final Random random;
|
||||
|
||||
|
|
|
@ -20,7 +20,9 @@ package org.elasticsearch.common;
|
|||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.HashSet;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
public class UUIDTests extends ESTestCase {
|
||||
|
@ -41,7 +43,18 @@ public class UUIDTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testThreadedRandomUUID() {
|
||||
testUUIDThreaded(randomUUIDGen);
|
||||
// we can not use a reproducible source of randomness for this
|
||||
// test, the test explicitly relies on each thread having a
|
||||
// unique source of randomness; thus, we fake what production
|
||||
// code does when using a RandomBasedUUIDGenerator
|
||||
testUUIDThreaded(new RandomBasedUUIDGenerator() {
|
||||
private final SecureRandom sr = SecureRandomHolder.INSTANCE;
|
||||
|
||||
@Override
|
||||
public String getBase64UUID() {
|
||||
return getBase64UUID(sr);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Set<String> verifyUUIDSet(int count, UUIDGenerator uuidSource) {
|
||||
|
@ -98,6 +111,6 @@ public class UUIDTests extends ESTestCase {
|
|||
for (UUIDGenRunner runner : runners) {
|
||||
globalSet.addAll(runner.uuidSet);
|
||||
}
|
||||
assertEquals(count*uuids, globalSet.size());
|
||||
assertEquals(count * uuids, globalSet.size());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,6 @@ public class NettyHttpRequestSizeLimitIT extends ESIntegTestCase {
|
|||
.build();
|
||||
}
|
||||
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.common.breaker:TRACE,org.elasticsearch.test:TRACE,org.elasticsearch.transport:TRACE")
|
||||
public void testLimitsInFlightRequests() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
|
|
|
@ -65,4 +65,22 @@ public class CharFilterTests extends ESTokenStreamTestCase {
|
|||
// Repeat one more time to make sure that char filter is reinitialized correctly
|
||||
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
|
||||
}
|
||||
|
||||
public void testPatternReplaceCharFilter() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put("index.analysis.char_filter.my_mapping.type", "pattern_replace")
|
||||
.put("index.analysis.char_filter.my_mapping.pattern", "ab*")
|
||||
.put("index.analysis.char_filter.my_mapping.replacement", "oo")
|
||||
.put("index.analysis.char_filter.my_mapping.flags", "CASE_INSENSITIVE")
|
||||
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
|
||||
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.build();
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
|
||||
AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings);
|
||||
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
|
||||
|
||||
assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
||||
import static org.elasticsearch.test.geo.RandomShapeGenerator.randomPoint;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
|
@ -105,8 +105,8 @@ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImpl
|
|||
assertThat(docCount, greaterThan(0));
|
||||
for (int i = 0; i < docCount; ++i) {
|
||||
final GeoPoint point = values.valueAt(i);
|
||||
assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT_INCL), lessThanOrEqualTo(GeoUtils.MAX_LAT_INCL)));
|
||||
assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON_INCL), lessThanOrEqualTo(GeoUtils.MAX_LON_INCL)));
|
||||
assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT), lessThanOrEqualTo(GeoUtils.MAX_LAT)));
|
||||
assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON), lessThanOrEqualTo(GeoUtils.MAX_LON)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue