diff --git a/buildSrc/src/main/resources/deb/postinst.ftl b/buildSrc/src/main/resources/deb/postinst.ftl index 5f67242c265..9acfc0f084e 100644 --- a/buildSrc/src/main/resources/deb/postinst.ftl +++ b/buildSrc/src/main/resources/deb/postinst.ftl @@ -1,2 +1,2 @@ -#!/bin/sh -e +#!/bin/bash -e <% commands.each {command -> %><%= command %><% } %> diff --git a/buildSrc/src/main/resources/deb/preinst.ftl b/buildSrc/src/main/resources/deb/preinst.ftl index 5f67242c265..9acfc0f084e 100644 --- a/buildSrc/src/main/resources/deb/preinst.ftl +++ b/buildSrc/src/main/resources/deb/preinst.ftl @@ -1,2 +1,2 @@ -#!/bin/sh -e +#!/bin/bash -e <% commands.each {command -> %><%= command %><% } %> diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 0e869d06149..9cc526d8f97 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -69,6 +69,8 @@ public class Version { public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_2_3_2_ID = 2030299; public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final int V_2_3_3_ID = 2030399; + public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -94,6 +96,8 @@ public class Version { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_3_3_ID: + return V_2_3_3; case V_2_3_2_ID: return V_2_3_2; case V_2_3_1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 59cd95044cc..fe97302060d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -54,7 +54,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT); + ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false); this.indicesService = indicesService; } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java index 9286601da69..9490abd0b68 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -165,9 +165,9 @@ public class PercolateRequestBuilder extends ActionRequestBuilder aggregationBuilder) { + public PercolateRequestBuilder addAggregation(AggregationBuilder aggregationBuilder) { sourceBuilder().addAggregation(aggregationBuilder); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java index a6ee99a476c..5c69d3be50b 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; @@ -53,7 +53,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { private List> sorts; private Boolean trackScores; private HighlightBuilder highlightBuilder; - private List> aggregationBuilders; + private List> aggregationBuilders; private List> pipelineAggregationBuilders; /** @@ -126,7 +126,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { /** * Add an aggregation definition. */ - public PercolateSourceBuilder addAggregation(AggregatorBuilder aggregationBuilder) { + public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) { if (aggregationBuilders == null) { aggregationBuilders = new ArrayList<>(); } @@ -175,7 +175,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { builder.field("aggregations"); builder.startObject(); if (aggregationBuilders != null) { - for (AggregatorBuilder aggregation : aggregationBuilders) { + for (AggregationBuilder aggregation : aggregationBuilders) { aggregation.toXContent(builder, params); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 9830f7be203..5732d43b4c7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -28,7 +28,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; @@ -373,7 +373,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder aggregation) { + public SearchRequestBuilder addAggregation(AggregationBuilder aggregation) { sourceBuilder().aggregation(aggregation); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 29863419a4e..3356d189143 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -84,6 +84,20 @@ public abstract class TransportBroadcastByNodeAction request, + String executor) { + this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request, + executor, true); + } + public TransportBroadcastByNodeAction( Settings settings, String actionName, @@ -93,7 +107,8 @@ public abstract class TransportBroadcastByNodeAction request, - String executor) { + String executor, + boolean canTripCircuitBreaker) { super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); this.clusterService = clusterService; @@ -101,7 +116,8 @@ public abstract class TransportBroadcastByNodeAction mapping : indexMetaData.getMappings().values()) { - if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { - throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); + String parentType = newMapper.parentFieldMapper().type(); + if (parentType.equals(mapping.value.type()) && + indexService.mapperService().getParentTypes().contains(parentType) == false) { + throw new IllegalArgumentException("can't add a _parent field that points to an " + + "already existing type, that isn't already a parent"); } } } diff --git a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java index e02342ffdca..bd0c5506859 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java @@ -53,6 +53,15 @@ public enum DateTimeUnit { return field; } + /** + * @param unit the {@link DateTimeUnit} to check + * @return true if the unit is a day or longer + */ + public static boolean isDayOrLonger(DateTimeUnit unit) { + return (unit == DateTimeUnit.HOUR_OF_DAY || unit == DateTimeUnit.MINUTES_OF_HOUR + || unit == DateTimeUnit.SECOND_OF_MINUTE) == false; + } + public static DateTimeUnit resolve(byte id) { switch (id) { case 1: return WEEK_OF_WEEKYEAR; diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 4189e412708..e0ffb89c8b0 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -46,8 +46,8 @@ public abstract class TimeZoneRounding extends Rounding { public static class Builder { - private DateTimeUnit unit; - private long interval = -1; + private final DateTimeUnit unit; + private final long interval; private DateTimeZone timeZone = DateTimeZone.UTC; @@ -142,10 +142,15 @@ public abstract class TimeZoneRounding extends Rounding { @Override public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long nextInLocalTime = durationField.add(timeLocal, 1); - return timeZone.convertLocalToUTC(nextInLocalTime, false); + if (DateTimeUnit.isDayOrLonger(unit)) { + time = timeZone.convertUTCToLocal(time); + } + long next = durationField.add(time, 1); + if (DateTimeUnit.isDayOrLonger(unit)) { + return timeZone.convertLocalToUTC(next, false); + } else { + return next; + } } @Override @@ -161,12 +166,12 @@ public abstract class TimeZoneRounding extends Rounding { out.writeByte(unit.id()); out.writeString(timeZone.getID()); } - + @Override public int hashCode() { return Objects.hash(unit, timeZone); } - + @Override public boolean equals(Object obj) { if (obj == null) { @@ -236,12 +241,12 @@ public abstract class TimeZoneRounding extends Rounding { out.writeVLong(interval); out.writeString(timeZone.getID()); } - + @Override public int hashCode() { return Objects.hash(interval, timeZone); } - + @Override public boolean equals(Object obj) { if (obj == null) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index dad5f48ce27..5b6130281d4 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -87,6 +87,7 @@ import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.repositories.uri.URLRepository; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -397,6 +398,9 @@ public final class ClusterSettings extends AbstractScopedSettings { JvmGcMonitorService.ENABLED_SETTING, JvmGcMonitorService.REFRESH_INTERVAL_SETTING, JvmGcMonitorService.GC_SETTING, + JvmGcMonitorService.GC_OVERHEAD_WARN_SETTING, + JvmGcMonitorService.GC_OVERHEAD_INFO_SETTING, + JvmGcMonitorService.GC_OVERHEAD_DEBUG_SETTING, PageCacheRecycler.LIMIT_HEAP_SETTING, PageCacheRecycler.WEIGHT_BYTES_SETTING, PageCacheRecycler.WEIGHT_INT_SETTING, @@ -417,6 +421,7 @@ public final class ClusterSettings extends AbstractScopedSettings { ResourceWatcherService.ENABLED, ResourceWatcherService.RELOAD_INTERVAL_HIGH, ResourceWatcherService.RELOAD_INTERVAL_MEDIUM, - ResourceWatcherService.RELOAD_INTERVAL_LOW + ResourceWatcherService.RELOAD_INTERVAL_LOW, + SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING ))); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index edd5d511f86..1efb65c18b1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -18,19 +18,6 @@ */ package org.elasticsearch.common.settings; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; @@ -50,6 +37,19 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. * Some (SettingsProperty.Dynamic) can by modified at run time using the API. @@ -504,7 +504,7 @@ public class Setting extends ToXContentToBytes { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } if (value > maxValue) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue); + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); } return value; } @@ -572,7 +572,7 @@ public class Setting extends ToXContentToBytes { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } if (value.bytes() > maxValue.bytes()) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue); + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); } return value; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 2e7acd6ae8c..2ed5ffd86cd 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -65,7 +65,12 @@ public class SettingsModule extends AbstractModule { protected void configure() { final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); - Settings indexSettings = settings.filter((s) -> s.startsWith("index.") && clusterSettings.get(s) == null); + Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") && + // special case - we want to get Did you mean indices.query.bool.max_clause_count + // which means we need to by-pass this check for this setting + // TODO remove in 6.0!! + "index.query.bool.max_clause_count".equals(s) == false) + && clusterSettings.get(s) == null); if (indexSettings.isEmpty() == false) { try { String separator = IntStream.range(0, 85).mapToObj(s -> "*").collect(Collectors.joining("")).trim(); diff --git a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java deleted file mode 100644 index 6c471cddb55..00000000000 --- a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ /dev/null @@ -1,629 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.util; - -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.hash.MurmurHash3; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.SizeValue; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Comparator; - -/** - * A bloom filter. Inspired by Guava bloom filter implementation though with some optimizations. - */ -public class BloomFilter { - - /** - * A factory that can use different fpp based on size. - */ - public static class Factory { - - public static final Factory DEFAULT = buildDefault(); - - private static Factory buildDefault() { - // Some numbers: - // 10k =0.001: 140.4kb , 10 Hashes - // 10k =0.01 : 93.6kb , 6 Hashes - // 100k=0.01 : 936.0kb , 6 Hashes - // 100k=0.03 : 712.7kb , 5 Hashes - // 500k=0.01 : 4.5mb , 6 Hashes - // 500k=0.03 : 3.4mb , 5 Hashes - // 500k=0.05 : 2.9mb , 4 Hashes - // 1m=0.01 : 9.1mb , 6 Hashes - // 1m=0.03 : 6.9mb , 5 Hashes - // 1m=0.05 : 5.9mb , 4 Hashes - // 5m=0.01 : 45.7mb , 6 Hashes - // 5m=0.03 : 34.8mb , 5 Hashes - // 5m=0.05 : 29.7mb , 4 Hashes - // 50m=0.01 : 457.0mb , 6 Hashes - // 50m=0.03 : 297.3mb , 4 Hashes - // 50m=0.10 : 228.5mb , 3 Hashes - return buildFromString("10k=0.01,1m=0.03"); - } - - /** - * Supports just passing fpp, as in "0.01", and also ranges, like "50k=0.01,1m=0.05". If - * its null, returns {@link #buildDefault()}. - */ - public static Factory buildFromString(@Nullable String config) { - if (config == null) { - return buildDefault(); - } - String[] sEntries = config.split(","); - if (sEntries.length == 0) { - if (config.length() > 0) { - return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))}); - } - return buildDefault(); - } - Entry[] entries = new Entry[sEntries.length]; - for (int i = 0; i < sEntries.length; i++) { - int index = sEntries[i].indexOf('='); - entries[i] = new Entry( - (int) SizeValue.parseSizeValue(sEntries[i].substring(0, index).trim()).singles(), - Double.parseDouble(sEntries[i].substring(index + 1).trim()) - ); - } - return new Factory(entries); - } - - private final Entry[] entries; - - public Factory(Entry[] entries) { - this.entries = entries; - // the order is from the upper most expected insertions to the lowest - Arrays.sort(this.entries, new Comparator() { - @Override - public int compare(Entry o1, Entry o2) { - return o2.expectedInsertions - o1.expectedInsertions; - } - }); - } - - public BloomFilter createFilter(int expectedInsertions) { - for (Entry entry : entries) { - if (expectedInsertions > entry.expectedInsertions) { - return BloomFilter.create(expectedInsertions, entry.fpp); - } - } - return BloomFilter.create(expectedInsertions, 0.03); - } - - public static class Entry { - public final int expectedInsertions; - public final double fpp; - - Entry(int expectedInsertions, double fpp) { - this.expectedInsertions = expectedInsertions; - this.fpp = fpp; - } - } - } - - /** - * Creates a bloom filter based on the with the expected number - * of insertions and expected false positive probability. - * - * @param expectedInsertions the number of expected insertions to the constructed - * @param fpp the desired false positive probability (must be positive and less than 1.0) - */ - public static BloomFilter create(int expectedInsertions, double fpp) { - return create(expectedInsertions, fpp, -1); - } - - /** - * Creates a bloom filter based on the expected number of insertions, expected false positive probability, - * and number of hash functions. - * - * @param expectedInsertions the number of expected insertions to the constructed - * @param fpp the desired false positive probability (must be positive and less than 1.0) - * @param numHashFunctions the number of hash functions to use (must be less than or equal to 255) - */ - public static BloomFilter create(int expectedInsertions, double fpp, int numHashFunctions) { - if (expectedInsertions == 0) { - expectedInsertions = 1; - } - /* - * TODO(user): Put a warning in the javadoc about tiny fpp values, - * since the resulting size is proportional to -log(p), but there is not - * much of a point after all, e.g. optimalM(1000, 0.0000000000000001) = 76680 - * which is less that 10kb. Who cares! - */ - long numBits = optimalNumOfBits(expectedInsertions, fpp); - - // calculate the optimal number of hash functions - if (numHashFunctions == -1) { - numHashFunctions = optimalNumOfHashFunctions(expectedInsertions, numBits); - } - - try { - return new BloomFilter(new BitArray(numBits), numHashFunctions, Hashing.DEFAULT); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e); - } - } - - public static void skipBloom(IndexInput in) throws IOException { - int version = in.readInt(); // we do nothing with this now..., defaults to 0 - final int numLongs = in.readInt(); - in.seek(in.getFilePointer() + (numLongs * 8) + 4 + 4); // filter + numberOfHashFunctions + hashType - } - - public static BloomFilter deserialize(DataInput in) throws IOException { - int version = in.readInt(); // we do nothing with this now..., defaults to 0 - int numLongs = in.readInt(); - long[] data = new long[numLongs]; - for (int i = 0; i < numLongs; i++) { - data[i] = in.readLong(); - } - int numberOfHashFunctions = in.readInt(); - int hashType = in.readInt(); - return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType)); - } - - public static void serilaize(BloomFilter filter, DataOutput out) throws IOException { - out.writeInt(0); // version - BitArray bits = filter.bits; - out.writeInt(bits.data.length); - for (long l : bits.data) { - out.writeLong(l); - } - out.writeInt(filter.numHashFunctions); - out.writeInt(filter.hashing.type()); // hashType - } - - public static BloomFilter readFrom(StreamInput in) throws IOException { - int version = in.readVInt(); // we do nothing with this now..., defaults to 0 - int numLongs = in.readVInt(); - long[] data = new long[numLongs]; - for (int i = 0; i < numLongs; i++) { - data[i] = in.readLong(); - } - int numberOfHashFunctions = in.readVInt(); - int hashType = in.readVInt(); // again, nothing to do now... - return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType)); - } - - public static void writeTo(BloomFilter filter, StreamOutput out) throws IOException { - out.writeVInt(0); // version - BitArray bits = filter.bits; - out.writeVInt(bits.data.length); - for (long l : bits.data) { - out.writeLong(l); - } - out.writeVInt(filter.numHashFunctions); - out.writeVInt(filter.hashing.type()); // hashType - } - - /** - * The bit set of the BloomFilter (not necessarily power of 2!) - */ - final BitArray bits; - /** - * Number of hashes per element - */ - final int numHashFunctions; - - final Hashing hashing; - - BloomFilter(BitArray bits, int numHashFunctions, Hashing hashing) { - this.bits = bits; - this.numHashFunctions = numHashFunctions; - this.hashing = hashing; - /* - * This only exists to forbid BFs that cannot use the compact persistent representation. - * If it ever throws, at a user who was not intending to use that representation, we should - * reconsider - */ - if (numHashFunctions > 255) { - throw new IllegalArgumentException("Currently we don't allow BloomFilters that would use more than 255 hash functions"); - } - } - - public boolean put(BytesRef value) { - return hashing.put(value, numHashFunctions, bits); - } - - public boolean mightContain(BytesRef value) { - return hashing.mightContain(value, numHashFunctions, bits); - } - - public int getNumHashFunctions() { - return this.numHashFunctions; - } - - public long getSizeInBytes() { - return bits.ramBytesUsed(); - } - - @Override - public int hashCode() { - return bits.hashCode() + numHashFunctions; - } - - /* - * Cheat sheet: - * - * m: total bits - * n: expected insertions - * b: m/n, bits per insertion - - * p: expected false positive probability - * - * 1) Optimal k = b * ln2 - * 2) p = (1 - e ^ (-kn/m))^k - * 3) For optimal k: p = 2 ^ (-k) ~= 0.6185^b - * 4) For optimal k: m = -nlnp / ((ln2) ^ 2) - */ - - /** - * Computes the optimal k (number of hashes per element inserted in Bloom filter), given the - * expected insertions and total number of bits in the Bloom filter. - *

- * See http://en.wikipedia.org/wiki/File:Bloom_filter_fp_probability.svg for the formula. - * - * @param n expected insertions (must be positive) - * @param m total number of bits in Bloom filter (must be positive) - */ - static int optimalNumOfHashFunctions(long n, long m) { - return Math.max(1, (int) Math.round(m / n * Math.log(2))); - } - - /** - * Computes m (total bits of Bloom filter) which is expected to achieve, for the specified - * expected insertions, the required false positive probability. - *

- * See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula. - * - * @param n expected insertions (must be positive) - * @param p false positive rate (must be 0 < p < 1) - */ - static long optimalNumOfBits(long n, double p) { - if (p == 0) { - p = Double.MIN_VALUE; - } - return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2))); - } - - // Note: We use this instead of java.util.BitSet because we need access to the long[] data field - static final class BitArray { - final long[] data; - final long bitSize; - long bitCount; - - BitArray(long bits) { - this(new long[size(bits)]); - } - - private static int size(long bits) { - long quotient = bits / 64; - long remainder = bits - quotient * 64; - return Math.toIntExact(remainder == 0 ? quotient : 1 + quotient); - } - - // Used by serialization - BitArray(long[] data) { - this.data = data; - long bitCount = 0; - for (long value : data) { - bitCount += Long.bitCount(value); - } - this.bitCount = bitCount; - this.bitSize = data.length * Long.SIZE; - } - - /** Returns true if the bit changed value. */ - boolean set(long index) { - if (!get(index)) { - data[(int) (index >>> 6)] |= (1L << index); - bitCount++; - return true; - } - return false; - } - - boolean get(long index) { - return (data[(int) (index >>> 6)] & (1L << index)) != 0; - } - - /** Number of bits */ - long bitSize() { - return bitSize; - } - - /** Number of set bits (1s) */ - long bitCount() { - return bitCount; - } - - BitArray copy() { - return new BitArray(data.clone()); - } - - /** Combines the two BitArrays using bitwise OR. */ - void putAll(BitArray array) { - bitCount = 0; - for (int i = 0; i < data.length; i++) { - data[i] |= array.data[i]; - bitCount += Long.bitCount(data[i]); - } - } - - @Override public boolean equals(Object o) { - if (o instanceof BitArray) { - BitArray bitArray = (BitArray) o; - return Arrays.equals(data, bitArray.data); - } - return false; - } - - @Override public int hashCode() { - return Arrays.hashCode(data); - } - - public long ramBytesUsed() { - return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16; - } - } - - static enum Hashing { - - V0() { - @Override - protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) { - long bitSize = bits.bitSize(); - long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0); - int hash1 = (int) hash64; - int hash2 = (int) (hash64 >>> 32); - boolean bitsChanged = false; - for (int i = 1; i <= numHashFunctions; i++) { - int nextHash = hash1 + i * hash2; - if (nextHash < 0) { - nextHash = ~nextHash; - } - bitsChanged |= bits.set(nextHash % bitSize); - } - return bitsChanged; - } - - @Override - protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) { - long bitSize = bits.bitSize(); - long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0); - int hash1 = (int) hash64; - int hash2 = (int) (hash64 >>> 32); - for (int i = 1; i <= numHashFunctions; i++) { - int nextHash = hash1 + i * hash2; - if (nextHash < 0) { - nextHash = ~nextHash; - } - if (!bits.get(nextHash % bitSize)) { - return false; - } - } - return true; - } - - @Override - protected int type() { - return 0; - } - }, - V1() { - @Override - protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) { - long bitSize = bits.bitSize(); - MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128()); - - boolean bitsChanged = false; - long combinedHash = hash128.h1; - for (int i = 0; i < numHashFunctions; i++) { - // Make the combined hash positive and indexable - bitsChanged |= bits.set((combinedHash & Long.MAX_VALUE) % bitSize); - combinedHash += hash128.h2; - } - return bitsChanged; - } - - @Override - protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) { - long bitSize = bits.bitSize(); - MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128()); - - long combinedHash = hash128.h1; - for (int i = 0; i < numHashFunctions; i++) { - // Make the combined hash positive and indexable - if (!bits.get((combinedHash & Long.MAX_VALUE) % bitSize)) { - return false; - } - combinedHash += hash128.h2; - } - return true; - } - - @Override - protected int type() { - return 1; - } - } - ; - - protected abstract boolean put(BytesRef value, int numHashFunctions, BitArray bits); - - protected abstract boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits); - - protected abstract int type(); - - public static final Hashing DEFAULT = Hashing.V1; - - public static Hashing fromType(int type) { - if (type == 0) { - return Hashing.V0; - } if (type == 1) { - return Hashing.V1; - } else { - throw new IllegalArgumentException("no hashing type matching " + type); - } - } - } - - // START : MURMUR 3_128 USED FOR Hashing.V0 - // NOTE: don't replace this code with the o.e.common.hashing.MurmurHash3 method which returns a different hash - - protected static long getblock(byte[] key, int offset, int index) { - int i_8 = index << 3; - int blockOffset = offset + i_8; - return ((long) key[blockOffset + 0] & 0xff) + (((long) key[blockOffset + 1] & 0xff) << 8) + - (((long) key[blockOffset + 2] & 0xff) << 16) + (((long) key[blockOffset + 3] & 0xff) << 24) + - (((long) key[blockOffset + 4] & 0xff) << 32) + (((long) key[blockOffset + 5] & 0xff) << 40) + - (((long) key[blockOffset + 6] & 0xff) << 48) + (((long) key[blockOffset + 7] & 0xff) << 56); - } - - protected static long rotl64(long v, int n) { - return ((v << n) | (v >>> (64 - n))); - } - - protected static long fmix(long k) { - k ^= k >>> 33; - k *= 0xff51afd7ed558ccdL; - k ^= k >>> 33; - k *= 0xc4ceb9fe1a85ec53L; - k ^= k >>> 33; - - return k; - } - - @SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm - public static long hash3_x64_128(byte[] key, int offset, int length, long seed) { - final int nblocks = length >> 4; // Process as 128-bit blocks. - - long h1 = seed; - long h2 = seed; - - long c1 = 0x87c37b91114253d5L; - long c2 = 0x4cf5ad432745937fL; - - //---------- - // body - - for (int i = 0; i < nblocks; i++) { - long k1 = getblock(key, offset, i * 2 + 0); - long k2 = getblock(key, offset, i * 2 + 1); - - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - - h1 = rotl64(h1, 27); - h1 += h2; - h1 = h1 * 5 + 0x52dce729; - - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - - h2 = rotl64(h2, 31); - h2 += h1; - h2 = h2 * 5 + 0x38495ab5; - } - - //---------- - // tail - - // Advance offset to the unprocessed tail of the data. - offset += nblocks * 16; - - long k1 = 0; - long k2 = 0; - - switch (length & 15) { - case 15: - k2 ^= ((long) key[offset + 14]) << 48; - case 14: - k2 ^= ((long) key[offset + 13]) << 40; - case 13: - k2 ^= ((long) key[offset + 12]) << 32; - case 12: - k2 ^= ((long) key[offset + 11]) << 24; - case 11: - k2 ^= ((long) key[offset + 10]) << 16; - case 10: - k2 ^= ((long) key[offset + 9]) << 8; - case 9: - k2 ^= ((long) key[offset + 8]) << 0; - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - - case 8: - k1 ^= ((long) key[offset + 7]) << 56; - case 7: - k1 ^= ((long) key[offset + 6]) << 48; - case 6: - k1 ^= ((long) key[offset + 5]) << 40; - case 5: - k1 ^= ((long) key[offset + 4]) << 32; - case 4: - k1 ^= ((long) key[offset + 3]) << 24; - case 3: - k1 ^= ((long) key[offset + 2]) << 16; - case 2: - k1 ^= ((long) key[offset + 1]) << 8; - case 1: - k1 ^= (key[offset]); - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - } - - //---------- - // finalization - - h1 ^= length; - h2 ^= length; - - h1 += h2; - h2 += h1; - - h1 = fmix(h1); - h2 = fmix(h2); - - h1 += h2; - h2 += h1; - - //return (new long[]{h1, h2}); - // SAME AS GUAVA, they take the first long out of the 128bit - return h1; - } - - // END: MURMUR 3_128 -} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index c33eef4ac61..2d73df76f07 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -321,7 +321,7 @@ public final class AnalysisRegistry implements Closeable { if (currentSettings.get("tokenizer") != null) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); } else { - throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it"); + throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer"); } } else if (typeName.equals("custom")) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); @@ -335,7 +335,7 @@ public final class AnalysisRegistry implements Closeable { factories.put(name, factory); } else { if (typeName == null) { - throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it"); + throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer"); } AnalysisModule.AnalysisProvider type = providerMap.get(typeName); if (type == null) { diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index e215669761c..84278fa92b3 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -39,6 +39,7 @@ import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; public class TranslogWriter extends BaseTranslogReader implements Closeable { @@ -60,7 +61,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { private volatile long totalOffset; protected final AtomicBoolean closed = new AtomicBoolean(false); - + // lock order synchronized(syncLock) -> synchronized(this) + private final Object syncLock = new Object(); public TranslogWriter(ShardId shardId, long generation, FileChannel channel, Path path, ByteSizeValue bufferSize) throws IOException { super(generation, channel, path, channel.position()); @@ -146,23 +148,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { * raising the exception. */ public void sync() throws IOException { - if (syncNeeded()) { - synchronized (this) { - ensureOpen(); - final long offsetToSync; - final int opsCounter; - try { - outputStream.flush(); - offsetToSync = totalOffset; - opsCounter = operationCounter; - checkpoint(offsetToSync, opsCounter, generation, channel, path); - } catch (Throwable ex) { - closeWithTragicEvent(ex); - throw ex; - } - lastSyncedOffset = offsetToSync; - } - } + syncUpTo(Long.MAX_VALUE); } /** @@ -229,9 +215,38 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { * @return true if this call caused an actual sync operation */ public boolean syncUpTo(long offset) throws IOException { - if (lastSyncedOffset < offset) { - sync(); - return true; + if (lastSyncedOffset < offset && syncNeeded()) { + synchronized (syncLock) { // only one sync/checkpoint should happen concurrently but we wait + if (lastSyncedOffset < offset && syncNeeded()) { + // double checked locking - we don't want to fsync unless we have to and now that we have + // the lock we should check again since if this code is busy we might have fsynced enough already + final long offsetToSync; + final int opsCounter; + synchronized (this) { + ensureOpen(); + try { + outputStream.flush(); + offsetToSync = totalOffset; + opsCounter = operationCounter; + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + } + // now do the actual fsync outside of the synchronized block such that + // we can continue writing to the buffer etc. + try { + channel.force(false); + writeCheckpoint(offsetToSync, opsCounter, path.getParent(), generation, StandardOpenOption.WRITE); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + assert lastSyncedOffset <= offsetToSync : "illegal state: " + lastSyncedOffset + " <= " + offsetToSync; + lastSyncedOffset = offsetToSync; // write protected by syncLock + return true; + } + } } return false; } @@ -254,11 +269,6 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { Channels.readFromFileChannelWithEofException(channel, position, targetBuffer); } - private synchronized void checkpoint(long lastSyncPosition, int operationCounter, long generation, FileChannel translogFileChannel, Path translogFilePath) throws IOException { - translogFileChannel.force(false); - writeCheckpoint(lastSyncPosition, operationCounter, translogFilePath.getParent(), generation, StandardOpenOption.WRITE); - } - private static void writeCheckpoint(long syncPosition, int numOperations, Path translogFile, long generation, OpenOption... options) throws IOException { final Path checkpointFile = translogFile.resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation); @@ -269,7 +279,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { static final ChannelFactory DEFAULT = new ChannelFactory(); - // only for testing until we have a disk-full FileSystemt + // only for testing until we have a disk-full FileSystem public FileChannel open(Path file) throws IOException { return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); } diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index caa97ea7387..641dc3a5bb3 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -396,12 +396,14 @@ public class FsInfo implements Iterable, Writeable, ToXContent { builder.endObject(); } builder.endArray(); + builder.startObject("total"); builder.field(OPERATIONS, totalOperations); builder.field(READ_OPERATIONS, totalReadOperations); builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + builder.endObject(); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index 01b4c68537e..ac75eb93aaa 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -31,6 +31,7 @@ import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector; import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.ScheduledFuture; @@ -45,6 +46,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent gcThresholds; + private final GcOverheadThreshold gcOverheadThreshold; private volatile ScheduledFuture scheduledFuture; @@ -57,6 +59,27 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, Property.NodeScope); + public final static Setting GC_OVERHEAD_WARN_SETTING = + Setting.intSetting("monitor.jvm.gc.overhead.warn", 50, 0, 100, Property.NodeScope); + public final static Setting GC_OVERHEAD_INFO_SETTING = + Setting.intSetting("monitor.jvm.gc.overhead.info", 25, 0, 100, Property.NodeScope); + public final static Setting GC_OVERHEAD_DEBUG_SETTING = + Setting.intSetting("monitor.jvm.gc.overhead.debug", 10, 0, 100, Property.NodeScope); + + static class GcOverheadThreshold { + final int warnThreshold; + final int infoThreshold; + final int debugThreshold; + + public GcOverheadThreshold(final int warnThreshold, final int infoThreshold, final int debugThreshold) { + this.warnThreshold = warnThreshold; + this.infoThreshold = infoThreshold; + this.debugThreshold = debugThreshold; + } + } + + + static class GcThreshold { public final String name; public final long warnThreshold; @@ -102,7 +125,42 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent[{}]/[{}], all_pools {}"; - @Override protected void doStart() { if (!enabled) { return; } - scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds) { + scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds, gcOverheadThreshold) { @Override void onMonitorFailure(Throwable t) { logger.debug("failed to monitor", t); @@ -138,9 +193,17 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent[{}]/[{}], all_pools {}"; + static void logSlowGc( final ESLogger logger, final JvmMonitor.Threshold threshold, @@ -162,7 +225,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent gcThresholds; + private final Map gcThresholds; + final GcOverheadThreshold gcOverheadThreshold; - public JvmMonitor(Map gcThresholds) { + public JvmMonitor(final Map gcThresholds, final GcOverheadThreshold gcOverheadThreshold) { this.gcThresholds = Objects.requireNonNull(gcThresholds); + this.gcOverheadThreshold = Objects.requireNonNull(gcOverheadThreshold); } @Override public void run() { try { - monitorLongGc(); + monitorGc(); } catch (Throwable t) { onMonitorFailure(t); } @@ -304,12 +396,21 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent= gcOverheadThreshold.warnThreshold) { + overheadThreshold = Threshold.WARN; + } else if (fraction >= gcOverheadThreshold.infoThreshold) { + overheadThreshold = Threshold.INFO; + } else if (fraction >= gcOverheadThreshold.debugThreshold) { + overheadThreshold = Threshold.DEBUG; + } + if (overheadThreshold != null) { + onGcOverhead(overheadThreshold, current, elapsed, seq); + } } JvmStats jvmStats() { @@ -364,6 +488,8 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting("indices.query.bool.max_clause_count", + 1024, 1, Integer.MAX_VALUE, Setting.Property.NodeScope); // pkg private so tests can mock Class searchServiceImpl = SearchService.class; @@ -421,10 +424,10 @@ public class SearchModule extends AbstractModule { * @param aggregationName names by which the aggregation may be parsed. The first name is special because it is the name that the reader * is registered under. */ - public > void registerAggregation(Writeable.Reader reader, Aggregator.Parser aggregationParser, - ParseField aggregationName) { + public > void registerAggregation(Writeable.Reader reader, Aggregator.Parser aggregationParser, + ParseField aggregationName) { aggregationParserRegistry.register(aggregationParser, aggregationName); - namedWriteableRegistry.register(AggregatorBuilder.class, aggregationName.getPreferredName(), reader); + namedWriteableRegistry.register(AggregationBuilder.class, aggregationName.getPreferredName(), reader); } /** @@ -478,55 +481,57 @@ public class SearchModule extends AbstractModule { } protected void configureAggs() { - registerAggregation(AvgAggregatorBuilder::new, new AvgParser(), AvgAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(SumAggregatorBuilder::new, new SumParser(), SumAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(MinAggregatorBuilder::new, new MinParser(), MinAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(MaxAggregatorBuilder::new, new MaxParser(), MaxAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(StatsAggregatorBuilder::new, new StatsParser(), StatsAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(ExtendedStatsAggregatorBuilder::new, new ExtendedStatsParser(), - ExtendedStatsAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(ValueCountAggregatorBuilder::new, new ValueCountParser(), ValueCountAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(PercentilesAggregatorBuilder::new, new PercentilesParser(), - PercentilesAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(PercentileRanksAggregatorBuilder::new, new PercentileRanksParser(), - PercentileRanksAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(CardinalityAggregatorBuilder::new, new CardinalityParser(), - CardinalityAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(GlobalAggregatorBuilder::new, GlobalAggregatorBuilder::parse, GlobalAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(MissingAggregatorBuilder::new, new MissingParser(), MissingAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(FilterAggregatorBuilder::new, FilterAggregatorBuilder::parse, FilterAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(FiltersAggregatorBuilder::new, FiltersAggregatorBuilder::parse, - FiltersAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(SamplerAggregatorBuilder::new, SamplerAggregatorBuilder::parse, - SamplerAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(DiversifiedAggregatorBuilder::new, new DiversifiedSamplerParser(), - DiversifiedAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(TermsAggregatorBuilder::new, new TermsParser(), TermsAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(SignificantTermsAggregatorBuilder::new, + registerAggregation(AvgAggregationBuilder::new, new AvgParser(), AvgAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(SumAggregationBuilder::new, new SumParser(), SumAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(MinAggregationBuilder::new, new MinParser(), MinAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(MaxAggregationBuilder::new, new MaxParser(), MaxAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(StatsAggregationBuilder::new, new StatsParser(), StatsAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(ExtendedStatsAggregationBuilder::new, new ExtendedStatsParser(), + ExtendedStatsAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(ValueCountAggregationBuilder::new, new ValueCountParser(), ValueCountAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(PercentilesAggregationBuilder::new, new PercentilesParser(), + PercentilesAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(PercentileRanksAggregationBuilder::new, new PercentileRanksParser(), + PercentileRanksAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(CardinalityAggregationBuilder::new, new CardinalityParser(), + CardinalityAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(GlobalAggregationBuilder::new, GlobalAggregationBuilder::parse, + GlobalAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(MissingAggregationBuilder::new, new MissingParser(), MissingAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(FilterAggregationBuilder::new, FilterAggregationBuilder::parse, + FilterAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(FiltersAggregationBuilder::new, FiltersAggregationBuilder::parse, + FiltersAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(SamplerAggregationBuilder::new, SamplerAggregationBuilder::parse, + SamplerAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(DiversifiedAggregationBuilder::new, new DiversifiedSamplerParser(), + DiversifiedAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(TermsAggregationBuilder::new, new TermsParser(), TermsAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(SignificantTermsAggregationBuilder::new, new SignificantTermsParser(significanceHeuristicParserRegistry, queryParserRegistry), - SignificantTermsAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(RangeAggregatorBuilder::new, new RangeParser(), RangeAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(DateRangeAggregatorBuilder::new, new DateRangeParser(), DateRangeAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(IpRangeAggregatorBuilder::new, new IpRangeParser(), IpRangeAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(HistogramAggregatorBuilder::new, new HistogramParser(), HistogramAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(DateHistogramAggregatorBuilder::new, new DateHistogramParser(), - DateHistogramAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(GeoDistanceAggregatorBuilder::new, new GeoDistanceParser(), - GeoDistanceAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(GeoGridAggregatorBuilder::new, new GeoHashGridParser(), GeoGridAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(NestedAggregatorBuilder::new, NestedAggregatorBuilder::parse, NestedAggregatorBuilder.AGGREGATION_FIELD_NAME); - registerAggregation(ReverseNestedAggregatorBuilder::new, ReverseNestedAggregatorBuilder::parse, - ReverseNestedAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(TopHitsAggregatorBuilder::new, TopHitsAggregatorBuilder::parse, - TopHitsAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(GeoBoundsAggregatorBuilder::new, new GeoBoundsParser(), GeoBoundsAggregatorBuilder.AGGREGATION_NAME_FIED); - registerAggregation(GeoCentroidAggregatorBuilder::new, new GeoCentroidParser(), - GeoCentroidAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(ScriptedMetricAggregatorBuilder::new, ScriptedMetricAggregatorBuilder::parse, - ScriptedMetricAggregatorBuilder.AGGREGATION_NAME_FIELD); - registerAggregation(ChildrenAggregatorBuilder::new, ChildrenAggregatorBuilder::parse, - ChildrenAggregatorBuilder.AGGREGATION_NAME_FIELD); - + SignificantTermsAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(RangeAggregationBuilder::new, new RangeParser(), RangeAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(DateRangeAggregationBuilder::new, new DateRangeParser(), DateRangeAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(IpRangeAggregationBuilder::new, new IpRangeParser(), IpRangeAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(HistogramAggregationBuilder::new, new HistogramParser(), HistogramAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(DateHistogramAggregationBuilder::new, new DateHistogramParser(), + DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(), + GeoDistanceAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(GeoGridAggregationBuilder::new, new GeoHashGridParser(), GeoGridAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(NestedAggregationBuilder::new, NestedAggregationBuilder::parse, + NestedAggregationBuilder.AGGREGATION_FIELD_NAME); + registerAggregation(ReverseNestedAggregationBuilder::new, ReverseNestedAggregationBuilder::parse, + ReverseNestedAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(TopHitsAggregationBuilder::new, TopHitsAggregationBuilder::parse, + TopHitsAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(GeoBoundsAggregationBuilder::new, new GeoBoundsParser(), GeoBoundsAggregationBuilder.AGGREGATION_NAME_FIED); + registerAggregation(GeoCentroidAggregationBuilder::new, new GeoCentroidParser(), + GeoCentroidAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(ScriptedMetricAggregationBuilder::new, ScriptedMetricAggregationBuilder::parse, + ScriptedMetricAggregationBuilder.AGGREGATION_NAME_FIELD); + registerAggregation(ChildrenAggregationBuilder::new, ChildrenAggregationBuilder::parse, + ChildrenAggregationBuilder.AGGREGATION_NAME_FIELD); registerPipelineAggregation(DerivativePipelineAggregatorBuilder::new, DerivativePipelineAggregatorBuilder::parse, DerivativePipelineAggregatorBuilder.AGGREGATION_NAME_FIELD); registerPipelineAggregation(MaxBucketPipelineAggregatorBuilder::new, MaxBucketPipelineAggregatorBuilder.PARSER, @@ -650,8 +655,7 @@ public class SearchModule extends AbstractModule { registerQuery(MatchAllQueryBuilder::new, MatchAllQueryBuilder::fromXContent, MatchAllQueryBuilder.QUERY_NAME_FIELD); registerQuery(QueryStringQueryBuilder::new, QueryStringQueryBuilder::fromXContent, QueryStringQueryBuilder.QUERY_NAME_FIELD); registerQuery(BoostingQueryBuilder::new, BoostingQueryBuilder::fromXContent, BoostingQueryBuilder.QUERY_NAME_FIELD); - BooleanQuery.setMaxClauseCount(settings.getAsInt("index.query.bool.max_clause_count", - settings.getAsInt("indices.query.bool.max_clause_count", BooleanQuery.getMaxClauseCount()))); + BooleanQuery.setMaxClauseCount(INDICES_MAX_CLAUSE_COUNT_SETTING.get(settings)); registerQuery(BoolQueryBuilder::new, BoolQueryBuilder::fromXContent, BoolQueryBuilder.QUERY_NAME_FIELD); registerQuery(TermQueryBuilder::new, TermQueryBuilder::fromXContent, TermQueryBuilder.QUERY_NAME_FIELD); registerQuery(TermsQueryBuilder::new, TermsQueryBuilder::fromXContent, TermsQueryBuilder.QUERY_NAME_FIELD); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java similarity index 93% rename from core/src/main/java/org/elasticsearch/search/aggregations/AggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 159ab5a8a10..e0336247c75 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -36,7 +36,9 @@ import java.util.Objects; /** * A factory that knows how to create an {@link Aggregator} of a specific type. */ -public abstract class AggregatorBuilder> extends ToXContentToBytes implements NamedWriteable, ToXContent { +public abstract class AggregationBuilder> + extends ToXContentToBytes + implements NamedWriteable, ToXContent { protected String name; protected Type type; @@ -44,12 +46,12 @@ public abstract class AggregatorBuilder> extend protected Map metaData; /** - * Constructs a new aggregator factory. + * Constructs a new aggregation builder. * * @param name The aggregation name * @param type The aggregation type */ - public AggregatorBuilder(String name, Type type) { + public AggregationBuilder(String name, Type type) { if (name == null) { throw new IllegalArgumentException("[name] must not be null: [" + name + "]"); } @@ -63,7 +65,7 @@ public abstract class AggregatorBuilder> extend /** * Read from a stream. */ - protected AggregatorBuilder(StreamInput in, Type type) throws IOException { + protected AggregationBuilder(StreamInput in, Type type) throws IOException { name = in.readString(); this.type = type; factoriesBuilder = new AggregatorFactories.Builder(in); @@ -84,7 +86,7 @@ public abstract class AggregatorBuilder> extend * Add a sub aggregation to this aggregation. */ @SuppressWarnings("unchecked") - public AB subAggregation(AggregatorBuilder aggregation) { + public AB subAggregation(AggregationBuilder aggregation) { if (aggregation == null) { throw new IllegalArgumentException("[aggregation] must not be null: [" + name + "]"); } @@ -178,7 +180,7 @@ public abstract class AggregatorBuilder> extend if (getClass() != obj.getClass()) return false; @SuppressWarnings("unchecked") - AggregatorBuilder other = (AggregatorBuilder) obj; + AggregationBuilder other = (AggregationBuilder) obj; if (!Objects.equals(name, other.name)) return false; if (!Objects.equals(type, other.type)) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index b76d5d17891..b1818971d6b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -22,65 +22,65 @@ import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.children.Children; -import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filters.Filters; import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter; -import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.missing.Missing; -import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.nested.Nested; -import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; -import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.Range; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregatorBuilder; -import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; -import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; -import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.avg.Avg; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality; -import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds; -import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroid; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.Max; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.min.Min; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; /** * Utility class to create aggregations. @@ -93,234 +93,234 @@ public class AggregationBuilders { /** * Create a new {@link ValueCount} aggregation with the given name. */ - public static ValueCountAggregatorBuilder count(String name) { - return new ValueCountAggregatorBuilder(name, null); + public static ValueCountAggregationBuilder count(String name) { + return new ValueCountAggregationBuilder(name, null); } /** * Create a new {@link Avg} aggregation with the given name. */ - public static AvgAggregatorBuilder avg(String name) { - return new AvgAggregatorBuilder(name); + public static AvgAggregationBuilder avg(String name) { + return new AvgAggregationBuilder(name); } /** * Create a new {@link Max} aggregation with the given name. */ - public static MaxAggregatorBuilder max(String name) { - return new MaxAggregatorBuilder(name); + public static MaxAggregationBuilder max(String name) { + return new MaxAggregationBuilder(name); } /** * Create a new {@link Min} aggregation with the given name. */ - public static MinAggregatorBuilder min(String name) { - return new MinAggregatorBuilder(name); + public static MinAggregationBuilder min(String name) { + return new MinAggregationBuilder(name); } /** * Create a new {@link Sum} aggregation with the given name. */ - public static SumAggregatorBuilder sum(String name) { - return new SumAggregatorBuilder(name); + public static SumAggregationBuilder sum(String name) { + return new SumAggregationBuilder(name); } /** * Create a new {@link Stats} aggregation with the given name. */ - public static StatsAggregatorBuilder stats(String name) { - return new StatsAggregatorBuilder(name); + public static StatsAggregationBuilder stats(String name) { + return new StatsAggregationBuilder(name); } /** * Create a new {@link ExtendedStats} aggregation with the given name. */ - public static ExtendedStatsAggregatorBuilder extendedStats(String name) { - return new ExtendedStatsAggregatorBuilder(name); + public static ExtendedStatsAggregationBuilder extendedStats(String name) { + return new ExtendedStatsAggregationBuilder(name); } /** * Create a new {@link Filter} aggregation with the given name. */ - public static FilterAggregatorBuilder filter(String name, QueryBuilder filter) { - return new FilterAggregatorBuilder(name, filter); + public static FilterAggregationBuilder filter(String name, QueryBuilder filter) { + return new FilterAggregationBuilder(name, filter); } /** * Create a new {@link Filters} aggregation with the given name. */ - public static FiltersAggregatorBuilder filters(String name, KeyedFilter... filters) { - return new FiltersAggregatorBuilder(name, filters); + public static FiltersAggregationBuilder filters(String name, KeyedFilter... filters) { + return new FiltersAggregationBuilder(name, filters); } /** * Create a new {@link Filters} aggregation with the given name. */ - public static FiltersAggregatorBuilder filters(String name, QueryBuilder... filters) { - return new FiltersAggregatorBuilder(name, filters); + public static FiltersAggregationBuilder filters(String name, QueryBuilder... filters) { + return new FiltersAggregationBuilder(name, filters); } /** * Create a new {@link Sampler} aggregation with the given name. */ - public static SamplerAggregatorBuilder sampler(String name) { - return new SamplerAggregatorBuilder(name); + public static SamplerAggregationBuilder sampler(String name) { + return new SamplerAggregationBuilder(name); } /** * Create a new {@link Sampler} aggregation with the given name. */ - public static DiversifiedAggregatorBuilder diversifiedSampler(String name) { - return new DiversifiedAggregatorBuilder(name); + public static DiversifiedAggregationBuilder diversifiedSampler(String name) { + return new DiversifiedAggregationBuilder(name); } /** * Create a new {@link Global} aggregation with the given name. */ - public static GlobalAggregatorBuilder global(String name) { - return new GlobalAggregatorBuilder(name); + public static GlobalAggregationBuilder global(String name) { + return new GlobalAggregationBuilder(name); } /** * Create a new {@link Missing} aggregation with the given name. */ - public static MissingAggregatorBuilder missing(String name) { - return new MissingAggregatorBuilder(name, null); + public static MissingAggregationBuilder missing(String name) { + return new MissingAggregationBuilder(name, null); } /** * Create a new {@link Nested} aggregation with the given name. */ - public static NestedAggregatorBuilder nested(String name, String path) { - return new NestedAggregatorBuilder(name, path); + public static NestedAggregationBuilder nested(String name, String path) { + return new NestedAggregationBuilder(name, path); } /** * Create a new {@link ReverseNested} aggregation with the given name. */ - public static ReverseNestedAggregatorBuilder reverseNested(String name) { - return new ReverseNestedAggregatorBuilder(name); + public static ReverseNestedAggregationBuilder reverseNested(String name) { + return new ReverseNestedAggregationBuilder(name); } /** * Create a new {@link Children} aggregation with the given name. */ - public static ChildrenAggregatorBuilder children(String name, String childType) { - return new ChildrenAggregatorBuilder(name, childType); + public static ChildrenAggregationBuilder children(String name, String childType) { + return new ChildrenAggregationBuilder(name, childType); } /** * Create a new {@link GeoDistance} aggregation with the given name. */ - public static GeoDistanceAggregatorBuilder geoDistance(String name, GeoPoint origin) { - return new GeoDistanceAggregatorBuilder(name, origin); + public static GeoDistanceAggregationBuilder geoDistance(String name, GeoPoint origin) { + return new GeoDistanceAggregationBuilder(name, origin); } /** * Create a new {@link Histogram} aggregation with the given name. */ - public static HistogramAggregatorBuilder histogram(String name) { - return new HistogramAggregatorBuilder(name); + public static HistogramAggregationBuilder histogram(String name) { + return new HistogramAggregationBuilder(name); } /** * Create a new {@link GeoHashGrid} aggregation with the given name. */ - public static GeoGridAggregatorBuilder geohashGrid(String name) { - return new GeoGridAggregatorBuilder(name); + public static GeoGridAggregationBuilder geohashGrid(String name) { + return new GeoGridAggregationBuilder(name); } /** * Create a new {@link SignificantTerms} aggregation with the given name. */ - public static SignificantTermsAggregatorBuilder significantTerms(String name) { - return new SignificantTermsAggregatorBuilder(name, null); + public static SignificantTermsAggregationBuilder significantTerms(String name) { + return new SignificantTermsAggregationBuilder(name, null); } /** - * Create a new {@link DateHistogramAggregatorBuilder} aggregation with the given + * Create a new {@link DateHistogramAggregationBuilder} aggregation with the given * name. */ - public static DateHistogramAggregatorBuilder dateHistogram(String name) { - return new DateHistogramAggregatorBuilder(name); + public static DateHistogramAggregationBuilder dateHistogram(String name) { + return new DateHistogramAggregationBuilder(name); } /** * Create a new {@link Range} aggregation with the given name. */ - public static RangeAggregatorBuilder range(String name) { - return new RangeAggregatorBuilder(name); + public static RangeAggregationBuilder range(String name) { + return new RangeAggregationBuilder(name); } /** - * Create a new {@link DateRangeAggregatorBuilder} aggregation with the + * Create a new {@link DateRangeAggregationBuilder} aggregation with the * given name. */ - public static DateRangeAggregatorBuilder dateRange(String name) { - return new DateRangeAggregatorBuilder(name); + public static DateRangeAggregationBuilder dateRange(String name) { + return new DateRangeAggregationBuilder(name); } /** - * Create a new {@link IpRangeAggregatorBuilder} aggregation with the + * Create a new {@link IpRangeAggregationBuilder} aggregation with the * given name. */ - public static IpRangeAggregatorBuilder ipRange(String name) { - return new IpRangeAggregatorBuilder(name); + public static IpRangeAggregationBuilder ipRange(String name) { + return new IpRangeAggregationBuilder(name); } /** * Create a new {@link Terms} aggregation with the given name. */ - public static TermsAggregatorBuilder terms(String name) { - return new TermsAggregatorBuilder(name, null); + public static TermsAggregationBuilder terms(String name) { + return new TermsAggregationBuilder(name, null); } /** * Create a new {@link Percentiles} aggregation with the given name. */ - public static PercentilesAggregatorBuilder percentiles(String name) { - return new PercentilesAggregatorBuilder(name); + public static PercentilesAggregationBuilder percentiles(String name) { + return new PercentilesAggregationBuilder(name); } /** * Create a new {@link PercentileRanks} aggregation with the given name. */ - public static PercentileRanksAggregatorBuilder percentileRanks(String name) { - return new PercentileRanksAggregatorBuilder(name); + public static PercentileRanksAggregationBuilder percentileRanks(String name) { + return new PercentileRanksAggregationBuilder(name); } /** * Create a new {@link Cardinality} aggregation with the given name. */ - public static CardinalityAggregatorBuilder cardinality(String name) { - return new CardinalityAggregatorBuilder(name, null); + public static CardinalityAggregationBuilder cardinality(String name) { + return new CardinalityAggregationBuilder(name, null); } /** * Create a new {@link TopHits} aggregation with the given name. */ - public static TopHitsAggregatorBuilder topHits(String name) { - return new TopHitsAggregatorBuilder(name); + public static TopHitsAggregationBuilder topHits(String name) { + return new TopHitsAggregationBuilder(name); } /** * Create a new {@link GeoBounds} aggregation with the given name. */ - public static GeoBoundsAggregatorBuilder geoBounds(String name) { - return new GeoBoundsAggregatorBuilder(name); + public static GeoBoundsAggregationBuilder geoBounds(String name) { + return new GeoBoundsAggregationBuilder(name); } /** * Create a new {@link GeoCentroid} aggregation with the given name. */ - public static GeoCentroidAggregatorBuilder geoCentroid(String name) { - return new GeoCentroidAggregatorBuilder(name); + public static GeoCentroidAggregationBuilder geoCentroid(String name) { + return new GeoCentroidAggregationBuilder(name); } /** * Create a new {@link ScriptedMetric} aggregation with the given name. */ - public static ScriptedMetricAggregatorBuilder scriptedMetric(String name) { - return new ScriptedMetricAggregatorBuilder(name); + public static ScriptedMetricAggregationBuilder scriptedMetric(String name) { + return new ScriptedMetricAggregationBuilder(name); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index 426f148e38e..faceada6415 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -42,7 +42,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { /** * Parses the aggregation request and creates the appropriate aggregator factory for it. * - * @see AggregatorBuilder + * @see AggregationBuilder */ @FunctionalInterface public interface Parser { @@ -55,7 +55,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { * @return The resolved aggregator factory or {@code null} in case the aggregation should be skipped * @throws java.io.IOException When parsing fails */ - AggregatorBuilder parse(String aggregationName, QueryParseContext context) throws IOException; + AggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException; } /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index f1236cd5cce..4e07ffcc4d8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -124,7 +124,7 @@ public class AggregatorFactories { public static class Builder extends ToXContentToBytes implements Writeable { private final Set names = new HashSet<>(); - private final List> aggregatorBuilders = new ArrayList<>(); + private final List> aggregationBuilders = new ArrayList<>(); private final List> pipelineAggregatorBuilders = new ArrayList<>(); private boolean skipResolveOrder; @@ -140,7 +140,7 @@ public class AggregatorFactories { public Builder(StreamInput in) throws IOException { int factoriesSize = in.readVInt(); for (int i = 0; i < factoriesSize; i++) { - addAggregator(in.readNamedWriteable(AggregatorBuilder.class)); + addAggregator(in.readNamedWriteable(AggregationBuilder.class)); } int pipelineFactoriesSize = in.readVInt(); for (int i = 0; i < pipelineFactoriesSize; i++) { @@ -150,8 +150,8 @@ public class AggregatorFactories { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.aggregatorBuilders.size()); - for (AggregatorBuilder factory : aggregatorBuilders) { + out.writeVInt(this.aggregationBuilders.size()); + for (AggregationBuilder factory : aggregationBuilders) { out.writeNamedWriteable(factory); } out.writeVInt(this.pipelineAggregatorBuilders.size()); @@ -164,11 +164,11 @@ public class AggregatorFactories { throw new UnsupportedOperationException("This needs to be removed"); } - public Builder addAggregator(AggregatorBuilder factory) { + public Builder addAggregator(AggregationBuilder factory) { if (!names.add(factory.name)) { throw new IllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); } - aggregatorBuilders.add(factory); + aggregationBuilders.add(factory); return this; } @@ -186,30 +186,30 @@ public class AggregatorFactories { } public AggregatorFactories build(AggregationContext context, AggregatorFactory parent) throws IOException { - if (aggregatorBuilders.isEmpty() && pipelineAggregatorBuilders.isEmpty()) { + if (aggregationBuilders.isEmpty() && pipelineAggregatorBuilders.isEmpty()) { return EMPTY; } List> orderedpipelineAggregators = null; if (skipResolveOrder) { orderedpipelineAggregators = new ArrayList<>(pipelineAggregatorBuilders); } else { - orderedpipelineAggregators = resolvePipelineAggregatorOrder(this.pipelineAggregatorBuilders, this.aggregatorBuilders); + orderedpipelineAggregators = resolvePipelineAggregatorOrder(this.pipelineAggregatorBuilders, this.aggregationBuilders); } - AggregatorFactory[] aggFactories = new AggregatorFactory[aggregatorBuilders.size()]; - for (int i = 0; i < aggregatorBuilders.size(); i++) { - aggFactories[i] = aggregatorBuilders.get(i).build(context, parent); + AggregatorFactory[] aggFactories = new AggregatorFactory[aggregationBuilders.size()]; + for (int i = 0; i < aggregationBuilders.size(); i++) { + aggFactories[i] = aggregationBuilders.get(i).build(context, parent); } return new AggregatorFactories(parent, aggFactories, orderedpipelineAggregators); } private List> resolvePipelineAggregatorOrder( - List> pipelineAggregatorBuilders, List> aggBuilders) { + List> pipelineAggregatorBuilders, List> aggBuilders) { Map> pipelineAggregatorBuildersMap = new HashMap<>(); for (PipelineAggregatorBuilder builder : pipelineAggregatorBuilders) { pipelineAggregatorBuildersMap.put(builder.getName(), builder); } - Map> aggBuildersMap = new HashMap<>(); - for (AggregatorBuilder aggBuilder : aggBuilders) { + Map> aggBuildersMap = new HashMap<>(); + for (AggregationBuilder aggBuilder : aggBuilders) { aggBuildersMap.put(aggBuilder.name, aggBuilder); } List> orderedPipelineAggregatorrs = new LinkedList<>(); @@ -223,7 +223,7 @@ public class AggregatorFactories { return orderedPipelineAggregatorrs; } - private void resolvePipelineAggregatorOrder(Map> aggBuildersMap, + private void resolvePipelineAggregatorOrder(Map> aggBuildersMap, Map> pipelineAggregatorBuildersMap, List> orderedPipelineAggregators, List> unmarkedBuilders, Set> temporarilyMarked, PipelineAggregatorBuilder builder) { @@ -238,7 +238,7 @@ public class AggregatorFactories { if (bucketsPath.equals("_count") || bucketsPath.equals("_key")) { continue; } else if (aggBuildersMap.containsKey(firstAggName)) { - AggregatorBuilder aggBuilder = aggBuildersMap.get(firstAggName); + AggregationBuilder aggBuilder = aggBuildersMap.get(firstAggName); for (int i = 1; i < bucketsPathElements.size(); i++) { PathElement pathElement = bucketsPathElements.get(i); String aggName = pathElement.name; @@ -247,9 +247,9 @@ public class AggregatorFactories { } else { // Check the non-pipeline sub-aggregator // factories - AggregatorBuilder[] subBuilders = aggBuilder.factoriesBuilder.getAggregatorFactories(); + AggregationBuilder[] subBuilders = aggBuilder.factoriesBuilder.getAggregatorFactories(); boolean foundSubBuilder = false; - for (AggregatorBuilder subBuilder : subBuilders) { + for (AggregationBuilder subBuilder : subBuilders) { if (aggName.equals(subBuilder.name)) { aggBuilder = subBuilder; foundSubBuilder = true; @@ -289,8 +289,8 @@ public class AggregatorFactories { } } - AggregatorBuilder[] getAggregatorFactories() { - return this.aggregatorBuilders.toArray(new AggregatorBuilder[this.aggregatorBuilders.size()]); + AggregationBuilder[] getAggregatorFactories() { + return this.aggregationBuilders.toArray(new AggregationBuilder[this.aggregationBuilders.size()]); } List> getPipelineAggregatorFactories() { @@ -298,14 +298,14 @@ public class AggregatorFactories { } public int count() { - return aggregatorBuilders.size() + pipelineAggregatorBuilders.size(); + return aggregationBuilders.size() + pipelineAggregatorBuilders.size(); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (aggregatorBuilders != null) { - for (AggregatorBuilder subAgg : aggregatorBuilders) { + if (aggregationBuilders != null) { + for (AggregationBuilder subAgg : aggregationBuilders) { subAgg.toXContent(builder, params); } } @@ -320,7 +320,7 @@ public class AggregatorFactories { @Override public int hashCode() { - return Objects.hash(aggregatorBuilders, pipelineAggregatorBuilders); + return Objects.hash(aggregationBuilders, pipelineAggregatorBuilders); } @Override @@ -330,7 +330,7 @@ public class AggregatorFactories { if (getClass() != obj.getClass()) return false; Builder other = (Builder) obj; - if (!Objects.equals(aggregatorBuilders, other.aggregatorBuilders)) + if (!Objects.equals(aggregationBuilders, other.aggregationBuilders)) return false; if (!Objects.equals(pipelineAggregatorBuilders, other.pipelineAggregatorBuilders)) return false; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index 2fd1f63d620..55345d6e5ec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -104,7 +104,7 @@ public class AggregatorParsers { + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } - AggregatorBuilder aggFactory = null; + AggregationBuilder aggFactory = null; PipelineAggregatorBuilder pipelineAggregatorFactory = null; AggregatorFactories.Builder subFactories = null; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java similarity index 92% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java index c5982d1000e..3749d2b2edd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java @@ -36,7 +36,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.ParentChild; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -44,7 +44,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -public class ChildrenAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalChildren.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -59,7 +59,7 @@ public class ChildrenAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class FilterAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalFilter.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -51,7 +50,7 @@ public class FilterAggregatorBuilder extends AggregatorBuilder { +public class FiltersAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalFilters.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -62,11 +62,11 @@ public class FiltersAggregatorBuilder extends AggregatorBuilder filters) { + private FiltersAggregationBuilder(String name, List filters) { super(name, InternalFilters.TYPE); // internally we want to have a fixed order of filters, regardless of the order of the filters in the request this.filters = new ArrayList<>(filters); @@ -80,7 +80,7 @@ public class FiltersAggregatorBuilder extends AggregatorBuilder keyedFilters = new ArrayList<>(filters.length); for (int i = 0; i < filters.length; i++) { @@ -93,7 +93,7 @@ public class FiltersAggregatorBuilder extends AggregatorBuilder { +public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalGeoHashGrid.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -53,14 +53,14 @@ public class GeoGridAggregatorBuilder extends ValuesSourceAggregatorBuilder pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index ba4f84017c1..1b2c4c26372 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -25,7 +25,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.NonCollectingAggregator; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregatorBuilder.CellIdSource; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder.CellIdSource; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java index 2ecf4953e78..1ae31e09ba0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java @@ -45,10 +45,10 @@ public class GeoHashGridParser extends GeoPointValuesSourceParser { } @Override - protected GeoGridAggregatorBuilder createFactory( + protected GeoGridAggregationBuilder createFactory( String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { - GeoGridAggregatorBuilder factory = new GeoGridAggregatorBuilder(aggregationName); + GeoGridAggregationBuilder factory = new GeoGridAggregationBuilder(aggregationName); Integer precision = (Integer) otherOptions.get(GeoHashGridParams.FIELD_PRECISION); if (precision != null) { factory.precision(precision); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java similarity index 84% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java index 7a60dcdab93..0f7e0713598 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java @@ -24,25 +24,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; -public class GlobalAggregatorBuilder extends AggregatorBuilder { +public class GlobalAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalGlobal.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public GlobalAggregatorBuilder(String name) { + public GlobalAggregationBuilder(String name) { super(name, InternalGlobal.TYPE); } /** * Read from a stream. */ - public GlobalAggregatorBuilder(StreamInput in) throws IOException { + public GlobalAggregationBuilder(StreamInput in) throws IOException { super(in, InternalGlobal.TYPE); } @@ -64,9 +64,9 @@ public class GlobalAggregatorBuilder extends AggregatorBuilder> - extends ValuesSourceAggregatorBuilder { + extends ValuesSourceAggregationBuilder { protected long interval; protected long offset = 0; @@ -200,4 +200,4 @@ public abstract class AbstractHistogramBuilder { +public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder { public static final String NAME = InternalDateHistogram.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); private DateHistogramInterval dateHistogramInterval; - public DateHistogramAggregatorBuilder(String name) { + public DateHistogramAggregationBuilder(String name) { super(name, InternalDateHistogram.HISTOGRAM_FACTORY); } /** * Read from a stream. */ - public DateHistogramAggregatorBuilder(StreamInput in) throws IOException { + public DateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in, InternalDateHistogram.HISTOGRAM_FACTORY); dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); } @@ -61,7 +61,7 @@ public class DateHistogramAggregatorBuilder extends AbstractHistogramBuilder otherOptions) { - DateHistogramAggregatorBuilder factory = new DateHistogramAggregatorBuilder(aggregationName); + protected DateHistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder(aggregationName); Object interval = otherOptions.get(Rounding.Interval.INTERVAL_FIELD); if (interval == null) { throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); @@ -89,6 +89,6 @@ public class DateHistogramParser extends HistogramParser { @Override protected long parseStringOffset(String offset) throws IOException { - return DateHistogramAggregatorBuilder.parseStringOffset(offset); + return DateHistogramAggregationBuilder.parseStringOffset(offset); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java similarity index 90% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 49bbd7160cf..54d52466bbb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -29,18 +29,18 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -public class HistogramAggregatorBuilder extends AbstractHistogramBuilder { +public class HistogramAggregationBuilder extends AbstractHistogramBuilder { public static final String NAME = InternalHistogram.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public HistogramAggregatorBuilder(String name) { + public HistogramAggregationBuilder(String name) { super(name, InternalHistogram.HISTOGRAM_FACTORY); } /** * Read from a stream. */ - public HistogramAggregatorBuilder(StreamInput in) throws IOException { + public HistogramAggregationBuilder(StreamInput in) throws IOException { super(in, InternalHistogram.HISTOGRAM_FACTORY); } @@ -55,4 +55,4 @@ public class HistogramAggregatorBuilder extends AbstractHistogramBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { - HistogramAggregatorBuilder factory = new HistogramAggregatorBuilder(aggregationName); + HistogramAggregationBuilder factory = new HistogramAggregationBuilder(aggregationName); Long interval = (Long) otherOptions.get(Rounding.Interval.INTERVAL_FIELD); if (interval == null) { throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index 34263980bf4..f10f7683841 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -28,25 +28,25 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class MissingAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalMissing.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public MissingAggregatorBuilder(String name, ValueType targetValueType) { + public MissingAggregationBuilder(String name, ValueType targetValueType) { super(name, InternalMissing.TYPE, ValuesSourceType.ANY, targetValueType); } /** * Read from a stream. */ - public MissingAggregatorBuilder(StreamInput in) throws IOException { + public MissingAggregationBuilder(StreamInput in) throws IOException { super(in, InternalMissing.TYPE, ValuesSourceType.ANY); } @@ -85,4 +85,4 @@ public class MissingAggregatorBuilder extends ValuesSourceAggregatorBuilder otherOptions) { - return new MissingAggregatorBuilder(aggregationName, targetValueType); + protected MissingAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new MissingAggregationBuilder(aggregationName, targetValueType); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java similarity index 89% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index f01a78e9211..33771910f16 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -34,7 +34,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Objects; -public class NestedAggregatorBuilder extends AggregatorBuilder { +public class NestedAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalNested.TYPE.name(); public static final ParseField AGGREGATION_FIELD_NAME = new ParseField(NAME); @@ -47,7 +47,7 @@ public class NestedAggregatorBuilder extends AggregatorBuilder { +public class ReverseNestedAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalReverseNested.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); private String path; - public ReverseNestedAggregatorBuilder(String name) { + public ReverseNestedAggregationBuilder(String name) { super(name, InternalReverseNested.TYPE); } /** * Read from a stream. */ - public ReverseNestedAggregatorBuilder(StreamInput in) throws IOException { + public ReverseNestedAggregationBuilder(StreamInput in) throws IOException { super(in, InternalReverseNested.TYPE); path = in.readOptionalString(); } @@ -62,7 +62,7 @@ public class ReverseNestedAggregatorBuilder extends AggregatorBuilder, R extends Range> - extends ValuesSourceAggregatorBuilder { + extends ValuesSourceAggregationBuilder { protected final InternalRange.Factory rangeFactory; protected List ranges = new ArrayList<>(); @@ -103,4 +103,4 @@ public abstract class AbstractRangeBuilder { +public class RangeAggregationBuilder extends AbstractRangeBuilder { public static final String NAME = InternalRange.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public RangeAggregatorBuilder(String name) { + public RangeAggregationBuilder(String name) { super(name, InternalRange.FACTORY); } /** * Read from a stream. */ - public RangeAggregatorBuilder(StreamInput in) throws IOException { + public RangeAggregationBuilder(StreamInput in) throws IOException { super(in, InternalRange.FACTORY, Range::new); } @@ -55,7 +55,7 @@ public class RangeAggregatorBuilder extends AbstractRangeBuilderfrom and * to. */ - public RangeAggregatorBuilder addRange(double from, double to) { + public RangeAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } @@ -77,7 +77,7 @@ public class RangeAggregatorBuilder extends AbstractRangeBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { - RangeAggregatorBuilder factory = new RangeAggregatorBuilder(aggregationName); + RangeAggregationBuilder factory = new RangeAggregationBuilder(aggregationName); @SuppressWarnings("unchecked") List ranges = (List) otherOptions.get(RangeAggregator.RANGES_FIELD); for (Range range : ranges) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java similarity index 81% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java index 9c28461df6c..392744a4f1c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java @@ -33,18 +33,18 @@ import org.joda.time.DateTime; import java.io.IOException; -public class DateRangeAggregatorBuilder extends AbstractRangeBuilder { +public class DateRangeAggregationBuilder extends AbstractRangeBuilder { public static final String NAME = InternalDateRange.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public DateRangeAggregatorBuilder(String name) { + public DateRangeAggregationBuilder(String name) { super(name, InternalDateRange.FACTORY); } /** * Read from a stream. */ - public DateRangeAggregatorBuilder(StreamInput in) throws IOException { + public DateRangeAggregationBuilder(StreamInput in) throws IOException { super(in, InternalDateRange.FACTORY, Range::new); } @@ -63,7 +63,7 @@ public class DateRangeAggregatorBuilder extends AbstractRangeBuilderfrom and to. */ - public DateRangeAggregatorBuilder addRange(String from, String to) { + public DateRangeAggregationBuilder addRange(String from, String to) { return addRange(null, from, to); } @@ -84,7 +84,7 @@ public class DateRangeAggregatorBuilder extends AbstractRangeBuilderfrom and to. */ - public DateRangeAggregatorBuilder addRange(double from, double to) { + public DateRangeAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } @@ -149,7 +149,7 @@ public class DateRangeAggregatorBuilder extends AbstractRangeBuilderfrom and to. */ - public DateRangeAggregatorBuilder addRange(DateTime from, DateTime to) { + public DateRangeAggregationBuilder addRange(DateTime from, DateTime to) { return addRange(null, from, to); } @@ -222,7 +222,7 @@ public class DateRangeAggregatorBuilder extends AbstractRangeBuilder otherOptions) { - DateRangeAggregatorBuilder factory = new DateRangeAggregatorBuilder(aggregationName); + protected DateRangeAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + DateRangeAggregationBuilder factory = new DateRangeAggregationBuilder(aggregationName); @SuppressWarnings("unchecked") List ranges = (List) otherOptions.get(RangeAggregator.RANGES_FIELD); for (Range range : ranges) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java similarity index 85% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java index a72b4fd322e..e82a769431a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java @@ -33,7 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser.Range; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -42,7 +42,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; -public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalGeoDistance.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -52,12 +52,12 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< private GeoDistance distanceType = GeoDistance.DEFAULT; private boolean keyed = false; - public GeoDistanceAggregatorBuilder(String name, GeoPoint origin) { + public GeoDistanceAggregationBuilder(String name, GeoPoint origin) { this(name, origin, InternalGeoDistance.FACTORY); } - private GeoDistanceAggregatorBuilder(String name, GeoPoint origin, - InternalRange.Factory rangeFactory) { + private GeoDistanceAggregationBuilder(String name, GeoPoint origin, + InternalRange.Factory rangeFactory) { super(name, rangeFactory.type(), rangeFactory.getValueSourceType(), rangeFactory.getValueType()); if (origin == null) { throw new IllegalArgumentException("[origin] must not be null: [" + name + "]"); @@ -68,7 +68,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< /** * Read from a stream. */ - public GeoDistanceAggregatorBuilder(StreamInput in) throws IOException { + public GeoDistanceAggregationBuilder(StreamInput in) throws IOException { super(in, InternalGeoDistance.FACTORY.type(), InternalGeoDistance.FACTORY.getValueSourceType(), InternalGeoDistance.FACTORY.getValueType()); origin = new GeoPoint(in.readDouble(), in.readDouble()); @@ -95,7 +95,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< unit.writeTo(out); } - public GeoDistanceAggregatorBuilder addRange(Range range) { + public GeoDistanceAggregationBuilder addRange(Range range) { if (range == null) { throw new IllegalArgumentException("[range] must not be null: [" + name + "]"); } @@ -113,7 +113,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * @param to * the upper bound on the distances, exclusive */ - public GeoDistanceAggregatorBuilder addRange(String key, double from, double to) { + public GeoDistanceAggregationBuilder addRange(String key, double from, double to) { ranges.add(new Range(key, from, to)); return this; } @@ -123,7 +123,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * automatically generated based on from and * to. */ - public GeoDistanceAggregatorBuilder addRange(double from, double to) { + public GeoDistanceAggregationBuilder addRange(double from, double to) { return addRange(null, from, to); } @@ -135,7 +135,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * @param to * the upper bound on the distances, exclusive */ - public GeoDistanceAggregatorBuilder addUnboundedTo(String key, double to) { + public GeoDistanceAggregationBuilder addUnboundedTo(String key, double to) { ranges.add(new Range(key, null, to)); return this; } @@ -144,7 +144,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * Same as {@link #addUnboundedTo(String, double)} but the key will be * computed automatically. */ - public GeoDistanceAggregatorBuilder addUnboundedTo(double to) { + public GeoDistanceAggregationBuilder addUnboundedTo(double to) { return addUnboundedTo(null, to); } @@ -156,7 +156,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * @param from * the lower bound on the distances, inclusive */ - public GeoDistanceAggregatorBuilder addUnboundedFrom(String key, double from) { + public GeoDistanceAggregationBuilder addUnboundedFrom(String key, double from) { addRange(new Range(key, from, null)); return this; } @@ -165,7 +165,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< * Same as {@link #addUnboundedFrom(String, double)} but the key will be * computed automatically. */ - public GeoDistanceAggregatorBuilder addUnboundedFrom(double from) { + public GeoDistanceAggregationBuilder addUnboundedFrom(double from) { return addUnboundedFrom(null, from); } @@ -178,7 +178,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< return NAME; } - public GeoDistanceAggregatorBuilder unit(DistanceUnit unit) { + public GeoDistanceAggregationBuilder unit(DistanceUnit unit) { if (unit == null) { throw new IllegalArgumentException("[unit] must not be null: [" + name + "]"); } @@ -190,7 +190,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< return unit; } - public GeoDistanceAggregatorBuilder distanceType(GeoDistance distanceType) { + public GeoDistanceAggregationBuilder distanceType(GeoDistance distanceType) { if (distanceType == null) { throw new IllegalArgumentException("[distanceType] must not be null: [" + name + "]"); } @@ -202,7 +202,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< return distanceType; } - public GeoDistanceAggregatorBuilder keyed(boolean keyed) { + public GeoDistanceAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } @@ -236,7 +236,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< @Override protected boolean innerEquals(Object obj) { - GeoDistanceAggregatorBuilder other = (GeoDistanceAggregatorBuilder) obj; + GeoDistanceAggregationBuilder other = (GeoDistanceAggregationBuilder) obj; return Objects.equals(origin, other.origin) && Objects.equals(ranges, other.ranges) && Objects.equals(keyed, other.keyed) @@ -244,4 +244,4 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder< && Objects.equals(unit, other.unit); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index ed6d6a67e2a..b98757aae5d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -85,10 +85,10 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser { } @Override - protected GeoDistanceAggregatorBuilder createFactory( + protected GeoDistanceAggregationBuilder createFactory( String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { GeoPoint origin = (GeoPoint) otherOptions.get(ORIGIN_FIELD); - GeoDistanceAggregatorBuilder factory = new GeoDistanceAggregatorBuilder(aggregationName, origin); + GeoDistanceAggregationBuilder factory = new GeoDistanceAggregationBuilder(aggregationName, origin); @SuppressWarnings("unchecked") List ranges = (List) otherOptions.get(RangeAggregator.RANGES_FIELD); for (Range range : ranges) { @@ -171,4 +171,4 @@ public class GeoDistanceParser extends GeoPointValuesSourceParser { } return false; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java similarity index 90% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java index c56a2952f8d..243db5f75e3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java @@ -44,14 +44,14 @@ import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -public final class IpRangeAggregatorBuilder - extends ValuesSourceAggregatorBuilder { +public final class IpRangeAggregationBuilder + extends ValuesSourceAggregationBuilder { private static final String NAME = "ip_range"; public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME); @@ -163,7 +163,7 @@ public final class IpRangeAggregatorBuilder private boolean keyed = false; private List ranges = new ArrayList<>(); - public IpRangeAggregatorBuilder(String name) { + public IpRangeAggregationBuilder(String name) { super(name, TYPE, ValuesSourceType.BYTES, ValueType.IP); } @@ -172,7 +172,7 @@ public final class IpRangeAggregatorBuilder return NAME; } - public IpRangeAggregatorBuilder keyed(boolean keyed) { + public IpRangeAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } @@ -187,7 +187,7 @@ public final class IpRangeAggregatorBuilder } /** Add a new {@link Range} to this aggregation. */ - public IpRangeAggregatorBuilder addRange(Range range) { + public IpRangeAggregationBuilder addRange(Range range) { ranges.add(range); return this; } @@ -202,7 +202,7 @@ public final class IpRangeAggregatorBuilder * @param to * the upper bound on the distances, exclusive */ - public IpRangeAggregatorBuilder addRange(String key, String from, String to) { + public IpRangeAggregationBuilder addRange(String key, String from, String to) { addRange(new Range(key, from, to)); return this; } @@ -210,7 +210,7 @@ public final class IpRangeAggregatorBuilder /** * Add a new range to this aggregation using the CIDR notation. */ - public IpRangeAggregatorBuilder addMaskRange(String key, String mask) { + public IpRangeAggregationBuilder addMaskRange(String key, String mask) { return addRange(new Range(key, mask)); } @@ -218,7 +218,7 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addMaskRange(String, String)} but uses the mask itself as * a key. */ - public IpRangeAggregatorBuilder addMaskRange(String mask) { + public IpRangeAggregationBuilder addMaskRange(String mask) { return addRange(new Range(mask, mask)); } @@ -226,7 +226,7 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addRange(String, String, String)} but the key will be * automatically generated. */ - public IpRangeAggregatorBuilder addRange(String from, String to) { + public IpRangeAggregationBuilder addRange(String from, String to) { return addRange(null, from, to); } @@ -234,7 +234,7 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addRange(String, String, String)} but there will be no * lower bound. */ - public IpRangeAggregatorBuilder addUnboundedTo(String key, String to) { + public IpRangeAggregationBuilder addUnboundedTo(String key, String to) { addRange(new Range(key, null, to)); return this; } @@ -243,7 +243,7 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addUnboundedTo(String, String)} but the key will be * generated automatically. */ - public IpRangeAggregatorBuilder addUnboundedTo(String to) { + public IpRangeAggregationBuilder addUnboundedTo(String to) { return addUnboundedTo(null, to); } @@ -251,13 +251,13 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addRange(String, String, String)} but there will be no * upper bound. */ - public IpRangeAggregatorBuilder addUnboundedFrom(String key, String from) { + public IpRangeAggregationBuilder addUnboundedFrom(String key, String from) { addRange(new Range(key, from, null)); return this; } @Override - public IpRangeAggregatorBuilder script(Script script) { + public IpRangeAggregationBuilder script(Script script) { throw new IllegalArgumentException("[ip_range] does not support scripts"); } @@ -265,11 +265,11 @@ public final class IpRangeAggregatorBuilder * Same as {@link #addUnboundedFrom(String, String)} but the key will be * generated automatically. */ - public IpRangeAggregatorBuilder addUnboundedFrom(String from) { + public IpRangeAggregationBuilder addUnboundedFrom(String from) { return addUnboundedFrom(null, from); } - public IpRangeAggregatorBuilder(StreamInput in) throws IOException { + public IpRangeAggregationBuilder(StreamInput in) throws IOException { super(in, TYPE, ValuesSourceType.BYTES, ValueType.IP); final int numRanges = in.readVInt(); for (int i = 0; i < numRanges; ++i) { @@ -323,7 +323,7 @@ public final class IpRangeAggregatorBuilder @Override protected boolean innerEquals(Object obj) { - IpRangeAggregatorBuilder that = (IpRangeAggregatorBuilder) obj; + IpRangeAggregationBuilder that = (IpRangeAggregationBuilder) obj; return keyed == that.keyed && ranges.equals(that.ranges); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java index 64ed77d42f3..8445fb2d459 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java @@ -30,10 +30,10 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.BytesValuesSourceParser; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; -import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregatorBuilder.Range; +import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder.Range; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceType; /** @@ -48,10 +48,10 @@ public class IpRangeParser extends BytesValuesSourceParser { } @Override - protected ValuesSourceAggregatorBuilder createFactory( + protected ValuesSourceAggregationBuilder createFactory( String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { - IpRangeAggregatorBuilder range = new IpRangeAggregatorBuilder(aggregationName); + IpRangeAggregationBuilder range = new IpRangeAggregationBuilder(aggregationName); @SuppressWarnings("unchecked") Iterable ranges = (Iterable) otherOptions.get(RangeAggregator.RANGES_FIELD); if (otherOptions.containsKey(RangeAggregator.RANGES_FIELD)) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java similarity index 88% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index f0c923e6efc..804574eea10 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -36,25 +36,25 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "diversified_sampler"; public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); public static final Type TYPE = new Type(NAME); public static final int MAX_DOCS_PER_VALUE_DEFAULT = 1; - private int shardSize = SamplerAggregatorBuilder.DEFAULT_SHARD_SAMPLE_SIZE; + private int shardSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE; private int maxDocsPerValue = MAX_DOCS_PER_VALUE_DEFAULT; private String executionHint = null; - public DiversifiedAggregatorBuilder(String name) { + public DiversifiedAggregationBuilder(String name) { super(name, TYPE, ValuesSourceType.ANY, null); } /** * Read from a stream. */ - public DiversifiedAggregatorBuilder(StreamInput in) throws IOException { + public DiversifiedAggregationBuilder(StreamInput in) throws IOException { super(in, TYPE, ValuesSourceType.ANY, null); shardSize = in.readVInt(); maxDocsPerValue = in.readVInt(); @@ -71,7 +71,7 @@ public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder< /** * Set the max num docs to be returned from each shard. */ - public DiversifiedAggregatorBuilder shardSize(int shardSize) { + public DiversifiedAggregationBuilder shardSize(int shardSize) { if (shardSize < 0) { throw new IllegalArgumentException( "[shardSize] must be greater than or equal to 0. Found [" + shardSize + "] in [" + name + "]"); @@ -90,7 +90,7 @@ public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder< /** * Set the max num docs to be returned per value. */ - public DiversifiedAggregatorBuilder maxDocsPerValue(int maxDocsPerValue) { + public DiversifiedAggregationBuilder maxDocsPerValue(int maxDocsPerValue) { if (maxDocsPerValue < 0) { throw new IllegalArgumentException( "[maxDocsPerValue] must be greater than or equal to 0. Found [" + maxDocsPerValue + "] in [" + name + "]"); @@ -109,7 +109,7 @@ public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder< /** * Set the execution hint. */ - public DiversifiedAggregatorBuilder executionHint(String executionHint) { + public DiversifiedAggregationBuilder executionHint(String executionHint) { this.executionHint = executionHint; return this; } @@ -145,7 +145,7 @@ public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder< @Override protected boolean innerEquals(Object obj) { - DiversifiedAggregatorBuilder other = (DiversifiedAggregatorBuilder) obj; + DiversifiedAggregationBuilder other = (DiversifiedAggregationBuilder) obj; return Objects.equals(shardSize, other.shardSize) && Objects.equals(maxDocsPerValue, other.maxDocsPerValue) && Objects.equals(executionHint, other.executionHint); @@ -155,4 +155,4 @@ public class DiversifiedAggregatorBuilder extends ValuesSourceAggregatorBuilder< public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java index cb87e53f2c0..f495071f6d2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java @@ -38,9 +38,9 @@ public class DiversifiedSamplerParser extends AnyValuesSourceParser { } @Override - protected DiversifiedAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - DiversifiedAggregatorBuilder factory = new DiversifiedAggregatorBuilder(aggregationName); + protected DiversifiedAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + DiversifiedAggregationBuilder factory = new DiversifiedAggregationBuilder(aggregationName); Integer shardSize = (Integer) otherOptions.get(SamplerAggregator.SHARD_SIZE_FIELD); if (shardSize != null) { factory.shardSize(shardSize); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java similarity index 87% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index 2cc3bb4c303..1220a2ddd42 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -34,7 +34,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Objects; -public class SamplerAggregatorBuilder extends AggregatorBuilder { +public class SamplerAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalSampler.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -42,14 +42,14 @@ public class SamplerAggregatorBuilder extends AggregatorBuilder { +public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = SignificantStringTerms.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -62,14 +62,14 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui private TermsAggregator.BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS); private SignificanceHeuristic significanceHeuristic = DEFAULT_SIGNIFICANCE_HEURISTIC; - public SignificantTermsAggregatorBuilder(String name, ValueType valueType) { + public SignificantTermsAggregationBuilder(String name, ValueType valueType) { super(name, SignificantStringTerms.TYPE, ValuesSourceType.ANY, valueType); } /** * Read from a Stream. */ - public SignificantTermsAggregatorBuilder(StreamInput in) throws IOException { + public SignificantTermsAggregationBuilder(StreamInput in) throws IOException { super(in, SignificantStringTerms.TYPE, ValuesSourceType.ANY); bucketCountThresholds = new BucketCountThresholds(in); executionHint = in.readOptionalString(); @@ -100,7 +100,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui return bucketCountThresholds; } - public SignificantTermsAggregatorBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { + public SignificantTermsAggregationBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { if (bucketCountThresholds == null) { throw new IllegalArgumentException("[bucketCountThresholds] must not be null: [" + name + "]"); } @@ -112,7 +112,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui * Sets the size - indicating how many term buckets should be returned * (defaults to 10) */ - public SignificantTermsAggregatorBuilder size(int size) { + public SignificantTermsAggregationBuilder size(int size) { if (size < 0) { throw new IllegalArgumentException("[size] must be greater than or equal to 0. Found [" + size + "] in [" + name + "]"); } @@ -126,7 +126,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui * search execution). The higher the shard size is, the more accurate the * results are. */ - public SignificantTermsAggregatorBuilder shardSize(int shardSize) { + public SignificantTermsAggregationBuilder shardSize(int shardSize) { if (shardSize < 0) { throw new IllegalArgumentException( "[shardSize] must be greater than or equal to 0. Found [" + shardSize + "] in [" + name + "]"); @@ -139,7 +139,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui * Set the minimum document count terms should have in order to appear in * the response. */ - public SignificantTermsAggregatorBuilder minDocCount(long minDocCount) { + public SignificantTermsAggregationBuilder minDocCount(long minDocCount) { if (minDocCount < 0) { throw new IllegalArgumentException( "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"); @@ -152,7 +152,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui * Set the minimum document count terms should have on the shard in order to * appear in the response. */ - public SignificantTermsAggregatorBuilder shardMinDocCount(long shardMinDocCount) { + public SignificantTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) { if (shardMinDocCount < 0) { throw new IllegalArgumentException( "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"); @@ -164,7 +164,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui /** * Expert: sets an execution hint to the aggregation. */ - public SignificantTermsAggregatorBuilder executionHint(String executionHint) { + public SignificantTermsAggregationBuilder executionHint(String executionHint) { this.executionHint = executionHint; return this; } @@ -176,7 +176,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui return executionHint; } - public SignificantTermsAggregatorBuilder backgroundFilter(QueryBuilder backgroundFilter) { + public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgroundFilter) { if (backgroundFilter == null) { throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]"); } @@ -191,7 +191,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui /** * Set terms to include and exclude from the aggregation results */ - public SignificantTermsAggregatorBuilder includeExclude(IncludeExclude includeExclude) { + public SignificantTermsAggregationBuilder includeExclude(IncludeExclude includeExclude) { this.includeExclude = includeExclude; return this; } @@ -203,7 +203,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui return includeExclude; } - public SignificantTermsAggregatorBuilder significanceHeuristic(SignificanceHeuristic significanceHeuristic) { + public SignificantTermsAggregationBuilder significanceHeuristic(SignificanceHeuristic significanceHeuristic) { if (significanceHeuristic == null) { throw new IllegalArgumentException("[significanceHeuristic] must not be null: [" + name + "]"); } @@ -226,7 +226,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { bucketCountThresholds.toXContent(builder, params); if (executionHint != null) { - builder.field(TermsAggregatorBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint); + builder.field(TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint); } if (filterBuilder != null) { builder.field(BACKGROUND_FILTER.getPreferredName(), filterBuilder); @@ -245,7 +245,7 @@ public class SignificantTermsAggregatorBuilder extends ValuesSourceAggregatorBui @Override protected boolean innerEquals(Object obj) { - SignificantTermsAggregatorBuilder other = (SignificantTermsAggregatorBuilder) obj; + SignificantTermsAggregationBuilder other = (SignificantTermsAggregationBuilder) obj; return Objects.equals(bucketCountThresholds, other.bucketCountThresholds) && Objects.equals(executionHint, other.executionHint) && Objects.equals(filterBuilder, other.filterBuilder) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index 13126029b8e..ab30e1b2d4a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -178,7 +178,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac numberOfAggregatorsCreated++; BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds); - if (bucketCountThresholds.getShardSize() == SignificantTermsAggregatorBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { + if (bucketCountThresholds.getShardSize() == SignificantTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { // The user has not made a shardSize selection . // Use default heuristic to avoid any wrong-ranking caused by // distributed counting @@ -211,7 +211,14 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } } assert execution != null; - return execution.create(name, factories, valuesSource, config.format(), bucketCountThresholds, includeExclude, context, parent, + + DocValueFormat format = config.format(); + if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) { + throw new AggregationExecutionException("Aggregation [" + name + "] cannot support regular expression style include/exclude " + + "settings as they can only be applied to string fields. Use an array of values for include/exclude clauses"); + } + + return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, context, parent, significanceHeuristic, this, pipelineAggregators, metaData); } @@ -227,7 +234,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } IncludeExclude.LongFilter longFilter = null; if (includeExclude != null) { - longFilter = includeExclude.convertToLongFilter(); + longFilter = includeExclude.convertToLongFilter(config.format()); } return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), bucketCountThresholds, context, parent, significanceHeuristic, this, longFilter, pipelineAggregators, @@ -248,7 +255,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic, SignificantTermsAggregatorFactory termsAggregatorFactory, List pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); + final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format); return new SignificantStringTermsAggregator(name, factories, valuesSource, format, bucketCountThresholds, filter, aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData); } @@ -262,7 +269,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic, SignificantTermsAggregatorFactory termsAggregatorFactory, List pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter, aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData); @@ -277,7 +284,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic, SignificantTermsAggregatorFactory termsAggregatorFactory, List pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter, aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java index 60805bea692..33db8f97335 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java @@ -53,10 +53,11 @@ public class SignificantTermsParser extends AbstractTermsParser { } @Override - protected SignificantTermsAggregatorBuilder doCreateFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode collectMode, String executionHint, - IncludeExclude incExc, Map otherOptions) { - SignificantTermsAggregatorBuilder factory = new SignificantTermsAggregatorBuilder(aggregationName, targetValueType); + protected SignificantTermsAggregationBuilder doCreateFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, BucketCountThresholds bucketCountThresholds, + SubAggCollectionMode collectMode, String executionHint, + IncludeExclude incExc, Map otherOptions) { + SignificantTermsAggregationBuilder factory = new SignificantTermsAggregationBuilder(aggregationName, targetValueType); if (bucketCountThresholds != null) { factory.bucketCountThresholds(bucketCountThresholds); } @@ -66,11 +67,12 @@ public class SignificantTermsParser extends AbstractTermsParser { if (incExc != null) { factory.includeExclude(incExc); } - QueryBuilder backgroundFilter = (QueryBuilder) otherOptions.get(SignificantTermsAggregatorBuilder.BACKGROUND_FILTER); + QueryBuilder backgroundFilter = (QueryBuilder) otherOptions.get(SignificantTermsAggregationBuilder.BACKGROUND_FILTER); if (backgroundFilter != null) { factory.backgroundFilter(backgroundFilter); } - SignificanceHeuristic significanceHeuristic = (SignificanceHeuristic) otherOptions.get(SignificantTermsAggregatorBuilder.HEURISTIC); + SignificanceHeuristic significanceHeuristic = + (SignificanceHeuristic) otherOptions.get(SignificantTermsAggregationBuilder.HEURISTIC); if (significanceHeuristic != null) { factory.significanceHeuristic(significanceHeuristic); } @@ -85,12 +87,12 @@ public class SignificantTermsParser extends AbstractTermsParser { .lookupReturningNullIfNotFound(currentFieldName, parseFieldMatcher); if (significanceHeuristicParser != null) { SignificanceHeuristic significanceHeuristic = significanceHeuristicParser.parse(parser, parseFieldMatcher); - otherOptions.put(SignificantTermsAggregatorBuilder.HEURISTIC, significanceHeuristic); + otherOptions.put(SignificantTermsAggregationBuilder.HEURISTIC, significanceHeuristic); return true; - } else if (parseFieldMatcher.match(currentFieldName, SignificantTermsAggregatorBuilder.BACKGROUND_FILTER)) { + } else if (parseFieldMatcher.match(currentFieldName, SignificantTermsAggregationBuilder.BACKGROUND_FILTER)) { QueryParseContext queryParseContext = new QueryParseContext(queriesRegistry, parser, parseFieldMatcher); QueryBuilder filter = queryParseContext.parseInnerQueryBuilder(); - otherOptions.put(SignificantTermsAggregatorBuilder.BACKGROUND_FILTER, filter); + otherOptions.put(SignificantTermsAggregationBuilder.BACKGROUND_FILTER, filter); return true; } } @@ -99,6 +101,6 @@ public class SignificantTermsParser extends AbstractTermsParser { @Override protected BucketCountThresholds getDefaultBucketCountThresholds() { - return new TermsAggregator.BucketCountThresholds(SignificantTermsAggregatorBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS); + return new TermsAggregator.BucketCountThresholds(SignificantTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index 5369e269058..b5781aa34be 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -60,7 +60,7 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms pipelineAggregators, Map metaData) { //We pass zero for index/subset sizes because for the purpose of significant term analysis // we assume an unmapped index's size is irrelevant to the proceedings. - super(0, 0, name, DocValueFormat.RAW, requiredSize, minDocCount, SignificantTermsAggregatorBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC, + super(0, 0, name, DocValueFormat.RAW, requiredSize, minDocCount, SignificantTermsAggregationBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC, BUCKETS, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java index a15c7d28427..3f27c4f1c6f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractTermsParser.java @@ -29,7 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; @@ -50,8 +50,10 @@ public abstract class AbstractTermsParser extends AnyValuesSourceParser { } @Override - protected final ValuesSourceAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { + protected final ValuesSourceAggregationBuilder createFactory(String aggregationName, + ValuesSourceType valuesSourceType, + ValueType targetValueType, + Map otherOptions) { BucketCountThresholds bucketCountThresholds = getDefaultBucketCountThresholds(); Integer requiredSize = (Integer) otherOptions.get(REQUIRED_SIZE_FIELD_NAME); if (requiredSize != null && requiredSize != -1) { @@ -77,10 +79,14 @@ public abstract class AbstractTermsParser extends AnyValuesSourceParser { otherOptions); } - protected abstract ValuesSourceAggregatorBuilder doCreateFactory(String aggregationName, - ValuesSourceType valuesSourceType, - ValueType targetValueType, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode collectMode, String executionHint, - IncludeExclude incExc, Map otherOptions); + protected abstract ValuesSourceAggregationBuilder doCreateFactory(String aggregationName, + ValuesSourceType valuesSourceType, + ValueType targetValueType, + BucketCountThresholds bucketCountThresholds, + SubAggCollectionMode collectMode, + String executionHint, + IncludeExclude incExc, + Map otherOptions); @Override protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java similarity index 89% rename from core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 53887d8b20c..f4cb133c499 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -30,7 +30,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -38,7 +38,7 @@ import java.io.IOException; import java.util.List; import java.util.Objects; -public class TermsAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = StringTerms.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -61,14 +61,14 @@ public class TermsAggregatorBuilder extends ValuesSourceAggregatorBuilder orders) { + public TermsAggregationBuilder order(List orders) { if (orders == null) { throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); } @@ -190,7 +190,7 @@ public class TermsAggregatorBuilder extends ValuesSourceAggregatorBuilder pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); + final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format); return new StringTermsAggregator(name, factories, valuesSource, order, format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, pipelineAggregators, metaData); } @@ -211,7 +216,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order, format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, pipelineAggregators, metaData); @@ -231,7 +236,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order, format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, pipelineAggregators, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java index 343ec5e2d78..2a67dbe2218 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java @@ -41,12 +41,13 @@ import java.util.Map; */ public class TermsParser extends AbstractTermsParser { @Override - protected TermsAggregatorBuilder doCreateFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode collectMode, String executionHint, - IncludeExclude incExc, Map otherOptions) { - TermsAggregatorBuilder factory = new TermsAggregatorBuilder(aggregationName, targetValueType); + protected TermsAggregationBuilder doCreateFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, BucketCountThresholds bucketCountThresholds, + SubAggCollectionMode collectMode, String executionHint, + IncludeExclude incExc, Map otherOptions) { + TermsAggregationBuilder factory = new TermsAggregationBuilder(aggregationName, targetValueType); @SuppressWarnings("unchecked") - List orderElements = (List) otherOptions.get(TermsAggregatorBuilder.ORDER_FIELD); + List orderElements = (List) otherOptions.get(TermsAggregationBuilder.ORDER_FIELD); if (orderElements != null) { List orders = new ArrayList<>(orderElements.size()); for (OrderElement orderElement : orderElements) { @@ -66,7 +67,7 @@ public class TermsParser extends AbstractTermsParser { if (incExc != null) { factory.includeExclude(incExc); } - Boolean showTermDocCountError = (Boolean) otherOptions.get(TermsAggregatorBuilder.SHOW_TERM_DOC_COUNT_ERROR); + Boolean showTermDocCountError = (Boolean) otherOptions.get(TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR); if (showTermDocCountError != null) { factory.showTermDocCountError(showTermDocCountError); } @@ -77,12 +78,12 @@ public class TermsParser extends AbstractTermsParser { public boolean parseSpecial(String aggregationName, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Token token, String currentFieldName, Map otherOptions) throws IOException { if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregatorBuilder.ORDER_FIELD)) { - otherOptions.put(TermsAggregatorBuilder.ORDER_FIELD, Collections.singletonList(parseOrderParam(aggregationName, parser))); + if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { + otherOptions.put(TermsAggregationBuilder.ORDER_FIELD, Collections.singletonList(parseOrderParam(aggregationName, parser))); return true; } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregatorBuilder.ORDER_FIELD)) { + if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.ORDER_FIELD)) { List orderElements = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { @@ -93,12 +94,12 @@ public class TermsParser extends AbstractTermsParser { "Order elements must be of type object in [" + aggregationName + "] found token of type [" + token + "]."); } } - otherOptions.put(TermsAggregatorBuilder.ORDER_FIELD, orderElements); + otherOptions.put(TermsAggregationBuilder.ORDER_FIELD, orderElements); return true; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (parseFieldMatcher.match(currentFieldName, TermsAggregatorBuilder.SHOW_TERM_DOC_COUNT_ERROR)) { - otherOptions.put(TermsAggregatorBuilder.SHOW_TERM_DOC_COUNT_ERROR, parser.booleanValue()); + if (parseFieldMatcher.match(currentFieldName, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR)) { + otherOptions.put(TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR, parser.booleanValue()); return true; } } @@ -158,7 +159,7 @@ public class TermsParser extends AbstractTermsParser { @Override public TermsAggregator.BucketCountThresholds getDefaultBucketCountThresholds() { - return new TermsAggregator.BucketCountThresholds(TermsAggregatorBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS); + return new TermsAggregator.BucketCountThresholds(TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS); } static Terms.Order resolveOrder(String key, boolean asc) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java index 101291d01e1..209700b86d9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; @@ -135,7 +136,8 @@ public class IncludeExclude implements Writeable, ToXContent { } public static abstract class OrdinalsFilter { - public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException; + public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) + throws IOException; } @@ -152,7 +154,8 @@ public class IncludeExclude implements Writeable, ToXContent { * */ @Override - public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException { + public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) + throws IOException { LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount()); TermsEnum globalTermsEnum; Terms globalTerms = new DocValuesTerms(globalOrdinals); @@ -179,7 +182,7 @@ public class IncludeExclude implements Writeable, ToXContent { @Override public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, WithOrdinals valueSource) throws IOException { LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount()); - if(includeValues!=null){ + if (includeValues != null) { for (BytesRef term : includeValues) { long ord = globalOrdinals.lookupTerm(term); if (ord >= 0) { @@ -534,33 +537,46 @@ public class IncludeExclude implements Writeable, ToXContent { return a; } - public StringFilter convertToStringFilter() { + public StringFilter convertToStringFilter(DocValueFormat format) { if (isRegexBased()) { return new AutomatonBackedStringFilter(toAutomaton()); } - return new TermListBackedStringFilter(includeValues, excludeValues); + return new TermListBackedStringFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format)); } - public OrdinalsFilter convertToOrdinalsFilter() { + private static SortedSet parseForDocValues(SortedSet endUserFormattedValues, DocValueFormat format) { + SortedSet result = endUserFormattedValues; + if (endUserFormattedValues != null) { + if (format != DocValueFormat.RAW) { + result = new TreeSet<>(); + for (BytesRef formattedVal : endUserFormattedValues) { + result.add(format.parseBytesRef(formattedVal.utf8ToString())); + } + } + } + return result; + } + + public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) { if (isRegexBased()) { return new AutomatonBackedOrdinalsFilter(toAutomaton()); } - return new TermListBackedOrdinalsFilter(includeValues, excludeValues); + return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format)); } - public LongFilter convertToLongFilter() { + public LongFilter convertToLongFilter(DocValueFormat format) { int numValids = includeValues == null ? 0 : includeValues.size(); int numInvalids = excludeValues == null ? 0 : excludeValues.size(); LongFilter result = new LongFilter(numValids, numInvalids); if (includeValues != null) { for (BytesRef val : includeValues) { - result.addAccept(Long.parseLong(val.utf8ToString())); + result.addAccept(format.parseLong(val.utf8ToString(), false, null)); } } if (excludeValues != null) { for (BytesRef val : excludeValues) { - result.addReject(Long.parseLong(val.utf8ToString())); + result.addReject(format.parseLong(val.utf8ToString(), false, null)); } } return result; @@ -572,13 +588,13 @@ public class IncludeExclude implements Writeable, ToXContent { LongFilter result = new LongFilter(numValids, numInvalids); if (includeValues != null) { for (BytesRef val : includeValues) { - double dval=Double.parseDouble(val.utf8ToString()); + double dval = Double.parseDouble(val.utf8ToString()); result.addAccept(NumericUtils.doubleToSortableLong(dval)); } } if (excludeValues != null) { for (BytesRef val : excludeValues) { - double dval=Double.parseDouble(val.utf8ToString()); + double dval = Double.parseDouble(val.utf8ToString()); result.addReject(NumericUtils.doubleToSortableLong(dval)); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java index da3733d13a9..ce098177a0b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java @@ -29,24 +29,24 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class AvgAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly { +public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalAvg.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public AvgAggregatorBuilder(String name) { + public AvgAggregationBuilder(String name) { super(name, InternalAvg.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public AvgAggregatorBuilder(StreamInput in) throws IOException { + public AvgAggregationBuilder(StreamInput in) throws IOException { super(in, InternalAvg.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @@ -80,4 +80,4 @@ public class AvgAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java index edb3d8f6620..b4f9261b1eb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java @@ -44,8 +44,8 @@ public class AvgParser extends NumericValuesSourceParser { } @Override - protected AvgAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new AvgAggregatorBuilder(aggregationName); + protected AvgAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new AvgAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java similarity index 90% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java index c9465cfb94d..a7850c23475 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregationBuilder.java @@ -28,14 +28,16 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -public final class CardinalityAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly { +public final class CardinalityAggregationBuilder + extends ValuesSourceAggregationBuilder.LeafOnly { + public static final String NAME = InternalCardinality.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -43,14 +45,14 @@ public final class CardinalityAggregatorBuilder extends ValuesSourceAggregatorBu private Long precisionThreshold = null; - public CardinalityAggregatorBuilder(String name, ValueType targetValueType) { + public CardinalityAggregationBuilder(String name, ValueType targetValueType) { super(name, InternalCardinality.TYPE, ValuesSourceType.ANY, targetValueType); } /** * Read from a stream. */ - public CardinalityAggregatorBuilder(StreamInput in) throws IOException { + public CardinalityAggregationBuilder(StreamInput in) throws IOException { super(in, InternalCardinality.TYPE, ValuesSourceType.ANY); if (in.readBoolean()) { precisionThreshold = in.readLong(); @@ -75,7 +77,7 @@ public final class CardinalityAggregatorBuilder extends ValuesSourceAggregatorBu * Set a precision threshold. Higher values improve accuracy but also * increase memory usage. */ - public CardinalityAggregatorBuilder precisionThreshold(long precisionThreshold) { + public CardinalityAggregationBuilder precisionThreshold(long precisionThreshold) { if (precisionThreshold < 0) { throw new IllegalArgumentException( "[precisionThreshold] must be greater than or equal to 0. Found [" + precisionThreshold + "] in [" + name + "]"); @@ -122,7 +124,7 @@ public final class CardinalityAggregatorBuilder extends ValuesSourceAggregatorBu @Override protected boolean innerEquals(Object obj) { - CardinalityAggregatorBuilder other = (CardinalityAggregatorBuilder) obj; + CardinalityAggregationBuilder other = (CardinalityAggregationBuilder) obj; return Objects.equals(precisionThreshold, other.precisionThreshold); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java index 3272d90eede..3a2e6a2072a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java @@ -40,10 +40,10 @@ public class CardinalityParser extends AnyValuesSourceParser { } @Override - protected CardinalityAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - CardinalityAggregatorBuilder factory = new CardinalityAggregatorBuilder(aggregationName, targetValueType); - Long precisionThreshold = (Long) otherOptions.get(CardinalityAggregatorBuilder.PRECISION_THRESHOLD_FIELD); + protected CardinalityAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + CardinalityAggregationBuilder factory = new CardinalityAggregationBuilder(aggregationName, targetValueType); + Long precisionThreshold = (Long) otherOptions.get(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD); if (precisionThreshold != null) { factory.precisionThreshold(precisionThreshold); } @@ -54,8 +54,8 @@ public class CardinalityParser extends AnyValuesSourceParser { protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, CardinalityAggregatorBuilder.PRECISION_THRESHOLD_FIELD)) { - otherOptions.put(CardinalityAggregatorBuilder.PRECISION_THRESHOLD_FIELD, parser.longValue()); + if (parseFieldMatcher.match(currentFieldName, CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD)) { + otherOptions.put(CardinalityAggregationBuilder.PRECISION_THRESHOLD_FIELD, parser.longValue()); return true; } else if (parseFieldMatcher.match(currentFieldName, REHASH)) { // ignore diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java similarity index 88% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java index ea4681ed686..eff020ec610 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregationBuilder.java @@ -28,27 +28,27 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -public class GeoBoundsAggregatorBuilder extends ValuesSourceAggregatorBuilder { +public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalGeoBounds.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIED = new ParseField(NAME); private boolean wrapLongitude = true; - public GeoBoundsAggregatorBuilder(String name) { + public GeoBoundsAggregationBuilder(String name) { super(name, InternalGeoBounds.TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } /** * Read from a stream. */ - public GeoBoundsAggregatorBuilder(StreamInput in) throws IOException { + public GeoBoundsAggregationBuilder(StreamInput in) throws IOException { super(in, InternalGeoBounds.TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); wrapLongitude = in.readBoolean(); } @@ -61,7 +61,7 @@ public class GeoBoundsAggregatorBuilder extends ValuesSourceAggregatorBuilder otherOptions) { - GeoBoundsAggregatorBuilder factory = new GeoBoundsAggregatorBuilder(aggregationName); + protected GeoBoundsAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(aggregationName); Boolean wrapLongitude = (Boolean) otherOptions.get(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD); if (wrapLongitude != null) { factory.wrapLongitude(wrapLongitude); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java similarity index 90% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java index ea8e54cdba9..f9bf2e0a346 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregationBuilder.java @@ -28,25 +28,25 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class GeoCentroidAggregatorBuilder - extends ValuesSourceAggregatorBuilder.LeafOnly { +public class GeoCentroidAggregationBuilder + extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalGeoCentroid.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public GeoCentroidAggregatorBuilder(String name) { + public GeoCentroidAggregationBuilder(String name) { super(name, InternalGeoCentroid.TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } /** * Read from a stream. */ - public GeoCentroidAggregatorBuilder(StreamInput in) throws IOException { + public GeoCentroidAggregationBuilder(StreamInput in) throws IOException { super(in, InternalGeoCentroid.TYPE, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); } @@ -80,4 +80,4 @@ public class GeoCentroidAggregatorBuilder public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java index b056920b141..6c9e9ba67b0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java @@ -46,8 +46,8 @@ public class GeoCentroidParser extends GeoPointValuesSourceParser { } @Override - protected GeoCentroidAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new GeoCentroidAggregatorBuilder(aggregationName); + protected GeoCentroidAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new GeoCentroidAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java index 7258e77aea5..9fa919fcf9e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregationBuilder.java @@ -29,24 +29,24 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class MaxAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly { +public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalMax.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public MaxAggregatorBuilder(String name) { + public MaxAggregationBuilder(String name) { super(name, InternalMax.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public MaxAggregatorBuilder(StreamInput in) throws IOException { + public MaxAggregationBuilder(StreamInput in) throws IOException { super(in, InternalMax.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @@ -80,4 +80,4 @@ public class MaxAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java index 41c321acf33..d2ddd4daa08 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java @@ -44,8 +44,8 @@ public class MaxParser extends NumericValuesSourceParser { } @Override - protected MaxAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new MaxAggregatorBuilder(aggregationName); + protected MaxAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new MaxAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java index c51e97f0538..af4f204bddb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregationBuilder.java @@ -29,24 +29,24 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class MinAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly { +public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalMin.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public MinAggregatorBuilder(String name) { + public MinAggregationBuilder(String name) { super(name, InternalMin.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public MinAggregatorBuilder(StreamInput in) throws IOException { + public MinAggregationBuilder(StreamInput in) throws IOException { super(in, InternalMin.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @@ -80,4 +80,4 @@ public class MinAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java index 9f9eafc5035..194c08fc49b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java @@ -45,8 +45,8 @@ public class MinParser extends NumericValuesSourceParser { } @Override - protected MinAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new MinAggregatorBuilder(aggregationName); + protected MinAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new MinAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java index f29615a593f..ec145754a04 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; @@ -115,8 +115,8 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse } @Override - protected ValuesSourceAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { + protected ValuesSourceAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { PercentilesMethod method = (PercentilesMethod) otherOptions.getOrDefault(METHOD_FIELD, PercentilesMethod.TDIGEST); double[] cdfValues = (double[]) otherOptions.get(keysField()); @@ -126,10 +126,10 @@ public abstract class AbstractPercentilesParser extends NumericValuesSourceParse return buildFactory(aggregationName, cdfValues, method, compression, numberOfSignificantValueDigits, keyed); } - protected abstract ValuesSourceAggregatorBuilder buildFactory(String aggregationName, double[] cdfValues, - PercentilesMethod method, - Double compression, - Integer numberOfSignificantValueDigits, Boolean keyed); + protected abstract ValuesSourceAggregationBuilder buildFactory(String aggregationName, double[] cdfValues, + PercentilesMethod method, + Double compression, + Integer numberOfSignificantValueDigits, Boolean keyed); protected abstract ParseField keysField(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java index ea03451cf67..d36dcdecb7b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregationBuilder.java @@ -32,7 +32,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder.LeafOnly; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder.LeafOnly; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -41,7 +41,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Objects; -public class PercentileRanksAggregatorBuilder extends LeafOnly { +public class PercentileRanksAggregationBuilder extends LeafOnly { public static final String NAME = InternalTDigestPercentileRanks.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -51,14 +51,14 @@ public class PercentileRanksAggregatorBuilder extends LeafOnly 5) { throw new IllegalArgumentException("[numberOfSignificantValueDigits] must be between 0 and 5: [" + name + "]"); } @@ -135,7 +135,7 @@ public class PercentileRanksAggregatorBuilder extends LeafOnly buildFactory(String aggregationName, double[] keys, PercentilesMethod method, - Double compression, Integer numberOfSignificantValueDigits, Boolean keyed) { - PercentileRanksAggregatorBuilder factory = new PercentileRanksAggregatorBuilder(aggregationName); + protected ValuesSourceAggregationBuilder buildFactory(String aggregationName, double[] keys, PercentilesMethod method, + Double compression, Integer numberOfSignificantValueDigits, + Boolean keyed) { + PercentileRanksAggregationBuilder factory = new PercentileRanksAggregationBuilder(aggregationName); if (keys != null) { factory.values(keys); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java index 45f3d37732f..d2d1e8d9f8a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java @@ -32,7 +32,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder.LeafOnly; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder.LeafOnly; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -41,7 +41,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Objects; -public class PercentilesAggregatorBuilder extends LeafOnly { +public class PercentilesAggregationBuilder extends LeafOnly { public static final String NAME = InternalTDigestPercentiles.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -51,14 +51,14 @@ public class PercentilesAggregatorBuilder extends LeafOnly 5) { throw new IllegalArgumentException("[numberOfSignificantValueDigits] must be between 0 and 5: [" + name + "]"); } @@ -135,7 +135,7 @@ public class PercentilesAggregatorBuilder extends LeafOnly buildFactory(String aggregationName, double[] keys, PercentilesMethod method, - Double compression, Integer numberOfSignificantValueDigits, Boolean keyed) { - PercentilesAggregatorBuilder factory = new PercentilesAggregatorBuilder(aggregationName); + protected ValuesSourceAggregationBuilder buildFactory(String aggregationName, double[] keys, PercentilesMethod method, + Double compression, Integer numberOfSignificantValueDigits, + Boolean keyed) { + PercentilesAggregationBuilder factory = new PercentilesAggregationBuilder(aggregationName); if (keys != null) { factory.percentiles(keys); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java similarity index 93% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 753052b4fe0..d5bdf2f5626 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -29,7 +29,7 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -40,7 +40,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -public class ScriptedMetricAggregatorBuilder extends AggregatorBuilder { +public class ScriptedMetricAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalScriptedMetric.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -58,14 +58,14 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuilder params; - public ScriptedMetricAggregatorBuilder(String name) { + public ScriptedMetricAggregationBuilder(String name) { super(name, InternalScriptedMetric.TYPE); } /** * Read from a stream. */ - public ScriptedMetricAggregatorBuilder(StreamInput in) throws IOException { + public ScriptedMetricAggregationBuilder(StreamInput in) throws IOException { super(in, InternalScriptedMetric.TYPE); initScript = in.readOptionalWriteable(Script::new); mapScript = in.readOptionalWriteable(Script::new); @@ -92,7 +92,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuilderinit script. */ - public ScriptedMetricAggregatorBuilder initScript(Script initScript) { + public ScriptedMetricAggregationBuilder initScript(Script initScript) { if (initScript == null) { throw new IllegalArgumentException("[initScript] must not be null: [" + name + "]"); } @@ -110,7 +110,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuildermap script. */ - public ScriptedMetricAggregatorBuilder mapScript(Script mapScript) { + public ScriptedMetricAggregationBuilder mapScript(Script mapScript) { if (mapScript == null) { throw new IllegalArgumentException("[mapScript] must not be null: [" + name + "]"); } @@ -128,7 +128,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuildercombine script. */ - public ScriptedMetricAggregatorBuilder combineScript(Script combineScript) { + public ScriptedMetricAggregationBuilder combineScript(Script combineScript) { if (combineScript == null) { throw new IllegalArgumentException("[combineScript] must not be null: [" + name + "]"); } @@ -146,7 +146,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuilderreduce script. */ - public ScriptedMetricAggregatorBuilder reduceScript(Script reduceScript) { + public ScriptedMetricAggregationBuilder reduceScript(Script reduceScript) { if (reduceScript == null) { throw new IllegalArgumentException("[reduceScript] must not be null: [" + name + "]"); } @@ -165,7 +165,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuilderinit, * map and combine phases. */ - public ScriptedMetricAggregatorBuilder params(Map params) { + public ScriptedMetricAggregationBuilder params(Map params) { if (params == null) { throw new IllegalArgumentException("[params] must not be null: [" + name + "]"); } @@ -214,7 +214,7 @@ public class ScriptedMetricAggregatorBuilder extends AggregatorBuilder { +public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalStats.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public StatsAggregatorBuilder(String name) { + public StatsAggregationBuilder(String name) { super(name, InternalStats.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public StatsAggregatorBuilder(StreamInput in) throws IOException { + public StatsAggregationBuilder(StreamInput in) throws IOException { super(in, InternalStats.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @@ -80,4 +80,4 @@ public class StatsAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOn public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java index a57ba89b676..eacfc0068b4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java @@ -44,8 +44,8 @@ public class StatsParser extends NumericValuesSourceParser { } @Override - protected StatsAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new StatsAggregatorBuilder(aggregationName); + protected StatsAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new StatsAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java similarity index 88% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java index e6f49d719d6..a2b961f1fc3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregationBuilder.java @@ -29,28 +29,28 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -public class ExtendedStatsAggregatorBuilder - extends ValuesSourceAggregatorBuilder.LeafOnly { +public class ExtendedStatsAggregationBuilder + extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalExtendedStats.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); private double sigma = 2.0; - public ExtendedStatsAggregatorBuilder(String name) { + public ExtendedStatsAggregationBuilder(String name) { super(name, InternalExtendedStats.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public ExtendedStatsAggregatorBuilder(StreamInput in) throws IOException { + public ExtendedStatsAggregationBuilder(StreamInput in) throws IOException { super(in, InternalExtendedStats.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); sigma = in.readDouble(); } @@ -60,7 +60,7 @@ public class ExtendedStatsAggregatorBuilder out.writeDouble(sigma); } - public ExtendedStatsAggregatorBuilder sigma(double sigma) { + public ExtendedStatsAggregationBuilder sigma(double sigma) { if (sigma < 0.0) { throw new IllegalArgumentException("[sigma] must be greater than or equal to 0. Found [" + sigma + "] in [" + name + "]"); } @@ -91,7 +91,7 @@ public class ExtendedStatsAggregatorBuilder @Override protected boolean innerEquals(Object obj) { - ExtendedStatsAggregatorBuilder other = (ExtendedStatsAggregatorBuilder) obj; + ExtendedStatsAggregationBuilder other = (ExtendedStatsAggregationBuilder) obj; return Objects.equals(sigma, other.sigma); } @@ -99,4 +99,4 @@ public class ExtendedStatsAggregatorBuilder public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java index 76e6beac2da..c650847360f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java @@ -50,9 +50,9 @@ public class ExtendedStatsParser extends NumericValuesSourceParser { } @Override - protected ExtendedStatsAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - ExtendedStatsAggregatorBuilder factory = new ExtendedStatsAggregatorBuilder(aggregationName); + protected ExtendedStatsAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + ExtendedStatsAggregationBuilder factory = new ExtendedStatsAggregationBuilder(aggregationName); Double sigma = (Double) otherOptions.get(ExtendedStatsAggregator.SIGMA_FIELD); if (sigma != null) { factory.sigma(sigma); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java similarity index 91% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java index 30246452330..25dd1a3f214 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregationBuilder.java @@ -29,24 +29,24 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; -public class SumAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly { +public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalSum.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public SumAggregatorBuilder(String name) { + public SumAggregationBuilder(String name) { super(name, InternalSum.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ - public SumAggregatorBuilder(StreamInput in) throws IOException { + public SumAggregationBuilder(StreamInput in) throws IOException { super(in, InternalSum.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @@ -80,4 +80,4 @@ public class SumAggregatorBuilder extends ValuesSourceAggregatorBuilder.LeafOnly public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java index 7971adba4eb..6edc6cc8905 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java @@ -44,8 +44,8 @@ public class SumParser extends NumericValuesSourceParser { } @Override - protected SumAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { - return new SumAggregatorBuilder(aggregationName); + protected SumAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions) { + return new SumAggregationBuilder(aggregationName); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java similarity index 92% rename from core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 8f15437cc54..7195482f147 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationInitializationException; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -51,7 +51,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; -public class TopHitsAggregatorBuilder extends AggregatorBuilder { +public class TopHitsAggregationBuilder extends AggregationBuilder { public static final String NAME = InternalTopHits.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); @@ -67,14 +67,14 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder scriptFields; private FetchSourceContext fetchSourceContext; - public TopHitsAggregatorBuilder(String name) { + public TopHitsAggregationBuilder(String name) { super(name, InternalTopHits.TYPE); } /** * Read from a stream. */ - public TopHitsAggregatorBuilder(StreamInput in) throws IOException { + public TopHitsAggregationBuilder(StreamInput in) throws IOException { super(in, InternalTopHits.TYPE); explain = in.readBoolean(); fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new); @@ -159,7 +159,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder0. */ - public TopHitsAggregatorBuilder from(int from) { + public TopHitsAggregationBuilder from(int from) { if (from < 0) { throw new IllegalArgumentException("[from] must be greater than or equal to 0. Found [" + from + "] in [" + name + "]"); } @@ -177,7 +177,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder10. */ - public TopHitsAggregatorBuilder size(int size) { + public TopHitsAggregationBuilder size(int size) { if (size < 0) { throw new IllegalArgumentException("[size] must be greater than or equal to 0. Found [" + size + "] in [" + name + "]"); } @@ -200,7 +200,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sort) { + public TopHitsAggregationBuilder sort(SortBuilder sort) { if (sort == null) { throw new IllegalArgumentException("[sort] must not be null: [" + name + "]"); } @@ -248,7 +248,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder> sorts) { + public TopHitsAggregationBuilder sorts(List> sorts) { if (sorts == null) { throw new IllegalArgumentException("[sorts] must not be null: [" + name + "]"); } @@ -271,7 +271,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder fields) { + public TopHitsAggregationBuilder fields(List fields) { if (fields == null) { throw new IllegalArgumentException("[fields] must not be null: [" + name + "]"); } @@ -385,7 +385,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder fieldDataFields) { + public TopHitsAggregationBuilder fieldDataFields(List fieldDataFields) { if (fieldDataFields == null) { throw new IllegalArgumentException("[fieldDataFields] must not be null: [" + name + "]"); } @@ -442,7 +442,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder scriptFields) { + public TopHitsAggregationBuilder scriptFields(List scriptFields) { if (scriptFields == null) { throw new IllegalArgumentException("[scriptFields] must not be null: [" + name + "]"); } @@ -497,7 +497,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilderfalse. */ - public TopHitsAggregatorBuilder trackScores(boolean trackScores) { + public TopHitsAggregationBuilder trackScores(boolean trackScores) { this.trackScores = trackScores; return this; } @@ -544,7 +544,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder { +public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = InternalValueCount.TYPE.name(); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); - public ValueCountAggregatorBuilder(String name, ValueType targetValueType) { + public ValueCountAggregationBuilder(String name, ValueType targetValueType) { super(name, InternalValueCount.TYPE, ValuesSourceType.ANY, targetValueType); } /** * Read from a stream. */ - public ValueCountAggregatorBuilder(StreamInput in) throws IOException { + public ValueCountAggregationBuilder(StreamInput in) throws IOException { super(in, InternalValueCount.TYPE, ValuesSourceType.ANY); } @@ -84,4 +84,4 @@ public class ValueCountAggregatorBuilder extends ValuesSourceAggregatorBuilder.L public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java index 2f0e7e6f263..fe8a34f242b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.AnyValuesSourceParser; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; @@ -46,8 +46,8 @@ public class ValueCountParser extends AnyValuesSourceParser { } @Override - protected ValuesSourceAggregatorBuilder createFactory( + protected ValuesSourceAggregationBuilder createFactory( String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { - return new ValueCountAggregatorBuilder(aggregationName, targetValueType); + return new ValueCountAggregationBuilder(aggregationName, targetValueType); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java index 030f5143af3..51d2ea2e8c9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java @@ -84,7 +84,7 @@ public abstract class AbstractValuesSourceParser } @Override - public final ValuesSourceAggregatorBuilder parse(String aggregationName, QueryParseContext context) + public final ValuesSourceAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException { XContentParser parser = context.parser(); @@ -147,7 +147,7 @@ public abstract class AbstractValuesSourceParser } } - ValuesSourceAggregatorBuilder factory = createFactory(aggregationName, this.valuesSourceType, this.targetValueType, + ValuesSourceAggregationBuilder factory = createFactory(aggregationName, this.valuesSourceType, this.targetValueType, otherOptions); if (field != null) { factory.field(field); @@ -171,7 +171,7 @@ public abstract class AbstractValuesSourceParser } /** - * Creates a {@link ValuesSourceAggregatorBuilder} from the information + * Creates a {@link ValuesSourceAggregationBuilder} from the information * gathered by the subclass. Options parsed in * {@link AbstractValuesSourceParser} itself will be added to the factory * after it has been returned by this method. @@ -189,8 +189,8 @@ public abstract class AbstractValuesSourceParser * method * @return the created factory */ - protected abstract ValuesSourceAggregatorBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions); + protected abstract ValuesSourceAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, + ValueType targetValueType, Map otherOptions); /** * Allows subclasses of {@link AbstractValuesSourceParser} to parse extra diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java similarity index 95% rename from core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorBuilder.java rename to core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index c92faa09613..78d2a2da10f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -31,7 +31,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationInitializationException; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -46,11 +46,11 @@ import java.util.Objects; /** * */ -public abstract class ValuesSourceAggregatorBuilder> - extends AggregatorBuilder { +public abstract class ValuesSourceAggregationBuilder> + extends AggregationBuilder { - public static abstract class LeafOnly> - extends ValuesSourceAggregatorBuilder { + public static abstract class LeafOnly> + extends ValuesSourceAggregationBuilder { protected LeafOnly(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { super(name, type, valuesSourceType, targetValueType); @@ -87,7 +87,7 @@ public abstract class ValuesSourceAggregatorBuilder config; - protected ValuesSourceAggregatorBuilder(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { + protected ValuesSourceAggregationBuilder(String name, Type type, ValuesSourceType valuesSourceType, ValueType targetValueType) { super(name, type); if (valuesSourceType == null) { throw new IllegalArgumentException("[valuesSourceType] must not be null: [" + name + "]"); @@ -99,7 +99,7 @@ public abstract class ValuesSourceAggregatorBuilder other = (ValuesSourceAggregatorBuilder) obj; + ValuesSourceAggregationBuilder other = (ValuesSourceAggregationBuilder) obj; if (!Objects.equals(field, other.field)) return false; if (!Objects.equals(format, other.format)) diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 61f4acb81aa..dc45a99fa87 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; -import org.elasticsearch.search.aggregations.AggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; @@ -600,7 +600,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ /** * Add an aggregation to perform as part of the search. */ - public SearchSourceBuilder aggregation(AggregatorBuilder aggregation) { + public SearchSourceBuilder aggregation(AggregationBuilder aggregation) { if (aggregations == null) { aggregations = AggregatorFactories.builder(); } @@ -1033,6 +1033,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ suggestBuilder = SuggestBuilder.fromXContent(context, suggesters); } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) { sorts = new ArrayList<>(SortBuilder.fromXContent(context)); + } else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { + rescoreBuilders = new ArrayList<>(); + rescoreBuilders.add(RescoreBuilder.parseFromXContent(context)); } else if (context.getParseFieldMatcher().match(currentFieldName, EXT_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); ext = xContentBuilder.bytes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java new file mode 100644 index 00000000000..c6ce30e2a52 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { + + // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService + public void testAddChildTypePointingToAlreadyExistingType() throws Exception { + createIndex("test", Settings.EMPTY, "type", "field", "type=keyword"); + + // Shouldn't be able the add the _parent field pointing to an already existing type, which isn't a parent type + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin() + .indices() + .preparePutMapping("test") + .setType("child") + .setSource("_parent", "type=type") + .get()); + assertThat(e.getMessage(), + equalTo("can't add a _parent field that points to an already existing type, that isn't already a parent")); + } + + // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService + public void testAddExtraChildTypePointingToAlreadyParentExistingType() throws Exception { + IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test") + .addMapping("parent") + .addMapping("child1", "_parent", "type=parent") + ); + + // adding the extra child type that points to an already existing parent type is allowed: + client().admin() + .indices() + .preparePutMapping("test") + .setType("child2") + .setSource("_parent", "type=parent") + .get(); + + DocumentMapper documentMapper = indexService.mapperService().documentMapper("child2"); + assertThat(documentMapper.parentFieldMapper().type(), equalTo("parent")); + assertThat(documentMapper.parentFieldMapper().active(), is(true)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java index 63c04b1c5e1..b08b81db11a 100644 --- a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java +++ b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.test.ESBlobStoreContainerTestCase; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.io.IOException; import java.nio.file.Path; diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java index f6f53549ce4..7d4ac1acc07 100644 --- a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java +++ b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.test.ESBlobStoreTestCase; +import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.io.IOException; import java.nio.file.Path; diff --git a/core/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/core/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java new file mode 100644 index 00000000000..79ef6929645 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.rounding; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR; +import static org.elasticsearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY; +import static org.elasticsearch.common.rounding.DateTimeUnit.QUARTER; +import static org.elasticsearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR; +import static org.elasticsearch.common.rounding.DateTimeUnit.DAY_OF_MONTH; +import static org.elasticsearch.common.rounding.DateTimeUnit.HOUR_OF_DAY; +import static org.elasticsearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR; +import static org.elasticsearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE; + +public class DateTimeUnitTests extends ESTestCase { + + /** + * test that we don't accidentally change enum ids + */ + public void testEnumIds() { + assertEquals(1, WEEK_OF_WEEKYEAR.id()); + assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1)); + + assertEquals(2, YEAR_OF_CENTURY.id()); + assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2)); + + assertEquals(3, QUARTER.id()); + assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3)); + + assertEquals(4, MONTH_OF_YEAR.id()); + assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4)); + + assertEquals(5, DAY_OF_MONTH.id()); + assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5)); + + assertEquals(6, HOUR_OF_DAY.id()); + assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6)); + + assertEquals(7, MINUTES_OF_HOUR.id()); + assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7)); + + assertEquals(8, SECOND_OF_MINUTE.id()); + assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); + } + + public void testIsDayOrLonger() { + for (DateTimeUnit unit : DateTimeUnit.values()) { + if (DateTimeUnit.isDayOrLonger(unit)) { + assertTrue(unit == DAY_OF_MONTH || + unit == MONTH_OF_YEAR || + unit == QUARTER || + unit == YEAR_OF_CENTURY || + unit == WEEK_OF_WEEKYEAR); + } + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index 2c4d78adbd0..08a4ba11342 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -25,6 +25,7 @@ import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; +import java.util.ArrayList; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; @@ -147,21 +148,37 @@ public class TimeZoneRoundingTests extends ESTestCase { Rounding tzRounding; // testing savings to non savings switch tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build(); - assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))), - equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET")))); + assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2 + equalTo(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2)))); + assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))), + equalTo(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2)))); + assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))), + equalTo(time("2014-10-26T03:00:00", DateTimeZone.forOffsetHours(2)))); tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build(); - assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))), - equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET")))); + assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2 + equalTo(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2)))); + assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))), + equalTo(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2)))); + assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))), + equalTo(time("2014-10-26T03:00:00", DateTimeZone.forOffsetHours(2)))); // testing non savings to savings switch tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build(); - assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))), - equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET")))); + assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forOffsetHours(1))), // CET = UTC+1 + equalTo(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1)))); + assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))), + equalTo(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1)))); + assertThat(tzRounding.nextRoundingValue(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))), + equalTo(time("2014-03-30T03:00:00", DateTimeZone.forOffsetHours(1)))); tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build(); - assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))), - equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET")))); + assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forOffsetHours(1))), // CET = UTC+1 + equalTo(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1)))); + assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))), + equalTo(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1)))); + assertThat(tzRounding.nextRoundingValue(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))), + equalTo(time("2014-03-30T03:00:00", DateTimeZone.forOffsetHours(1)))); // testing non savings to savings switch (America/Chicago) tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build(); @@ -210,6 +227,31 @@ public class TimeZoneRoundingTests extends ESTestCase { } } + /** + * Test that nextRoundingValue() for hour rounding (and smaller) is equally spaced (see #18326) + * Start at a random date in a random time zone, then find the next zone offset transition (if any). + * From there, check that when we advance by using rounding#nextRoundingValue(), we always advance by the same + * amount of milliseconds. + */ + public void testSubHourNextRoundingEquallySpaced() { + String timeZone = randomFrom(new ArrayList<>(DateTimeZone.getAvailableIDs())); + DateTimeUnit unit = randomFrom(new DateTimeUnit[] { DateTimeUnit.HOUR_OF_DAY, DateTimeUnit.MINUTES_OF_HOUR, + DateTimeUnit.SECOND_OF_MINUTE }); + DateTimeZone tz = DateTimeZone.forID(timeZone); + TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(unit, tz); + // move the random date to transition for timezones that have offset change due to dst transition + long nextTransition = tz.nextTransition(Math.abs(randomLong() % ((long) 10e11))); + final long millisPerUnit = unit.field().getDurationField().getUnitMillis(); + // start ten units before transition + long roundedDate = rounding.round(nextTransition - (10 * millisPerUnit)); + while (roundedDate < nextTransition + 10 * millisPerUnit) { + long delta = rounding.nextRoundingValue(roundedDate) - roundedDate; + assertEquals("Difference between rounded values not equally spaced for [" + unit.name() + "], [" + timeZone + "] at " + + new DateTime(roundedDate), millisPerUnit, delta); + roundedDate = rounding.nextRoundingValue(roundedDate); + } + } + /** * randomized test on TimeIntervalRounding with random interval and time zone offsets */ diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 8b2dbdb4f07..78319297a98 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -430,7 +430,7 @@ public class SettingTests extends ESTestCase { integerSetting.get(Settings.builder().put("foo.bar", 11).build()); fail(); } catch (IllegalArgumentException ex) { - assertEquals("Failed to parse value [11] for setting [foo.bar] must be =< 10", ex.getMessage()); + assertEquals("Failed to parse value [11] for setting [foo.bar] must be <= 10", ex.getMessage()); } try { diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 977d5fa7b09..5e992fc947c 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -208,4 +208,13 @@ public class SettingsModuleTests extends ModuleTestCase { assertThat(e.getMessage(), containsString("Cannot register setting [foo.bar] twice")); } } + + public void testOldMaxClauseCountSetting() { + Settings settings = Settings.builder().put("index.query.bool.max_clause_count", 1024).build(); + SettingsModule module = new SettingsModule(settings); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> assertInstanceBinding(module, Settings.class, (s) -> s == settings)); + assertEquals("unknown setting [index.query.bool.max_clause_count] did you mean [indices.query.bool.max_clause_count]?", + ex.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java index b667c256019..b72996bd1a1 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisServiceTests.java @@ -41,6 +41,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class AnalysisServiceTests extends ESTestCase { @@ -183,4 +184,19 @@ public class AnalysisServiceTests extends ESTestCase { assertSame(analysisService.analyzer(preBuiltAnalyzers.name()), otherAnalysisSergice.analyzer(preBuiltAnalyzers.name())); } } + + public void testNoTypeOrTokenizerErrorMessage() throws IOException { + Version version = VersionUtils.randomVersion(random()); + Settings settings = Settings + .builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .putArray("index.analysis.analyzer.test_analyzer.filter", new String[] {"lowercase", "stop", "shingle"}) + .putArray("index.analysis.analyzer.test_analyzer.char_filter", new String[] {"html_strip"}) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new AnalysisRegistry(null, new Environment(settings)).build(idxSettings)); + assertThat(e.getMessage(), equalTo("analyzer [test_analyzer] must specify either an analyzer type, or a tokenizer")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 6cec3ae8c52..87f8f3981d3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -89,7 +89,7 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase resetting breaker settings"); + // clear all caches, we could be very close (or even above) the limit and then we will not be able to reset the breaker settings + client().admin().indices().prepareClearCache().setFieldDataCache(true).setQueryCache(true).setRequestCache(true).get(); + Settings resetSettings = Settings.builder() .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null)) @@ -214,7 +217,6 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { * Test that a breaker correctly redistributes to a different breaker, in * this case, the fielddata breaker borrows space from the request breaker */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/18325") public void testParentChecking() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); @@ -274,9 +276,6 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { cause.toString(), startsWith("CircuitBreakingException[[parent] Data too large")); assertThat("Exception: [" + cause.toString() + "] should contain a CircuitBreakingException", cause.toString(), endsWith(errMsg)); - } finally { - // reset before teardown as it requires properly set up breakers - reset(); } } diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java index 2c248969b2c..97a2b065627 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java @@ -79,6 +79,46 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { }, true, null); } + public void testIllegalOverheadSettings() throws InterruptedException { + for (final String threshold : new String[] { "warn", "info", "debug" }) { + final Settings.Builder builder = Settings.builder(); + builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(Integer.MIN_VALUE, -1)); + execute(builder.build(), (command, interval) -> null, t -> { + assertThat(t, instanceOf(IllegalArgumentException.class)); + assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be >= 0")); + }, true, null); + } + + for (final String threshold : new String[] { "warn", "info", "debug" }) { + final Settings.Builder builder = Settings.builder(); + builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(100 + 1, Integer.MAX_VALUE)); + execute(builder.build(), (command, interval) -> null, t -> { + assertThat(t, instanceOf(IllegalArgumentException.class)); + assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be <= 100")); + }, true, null); + } + + final Settings.Builder infoWarnOutOfOrderBuilder = Settings.builder(); + final int info = randomIntBetween(2, 98); + infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info); + final int warn = randomIntBetween(1, info - 1); + infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", warn); + execute(infoWarnOutOfOrderBuilder.build(), (command, interval) -> null, t -> { + assertThat(t, instanceOf(IllegalArgumentException.class)); + assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.warn] must be greater than [monitor.jvm.gc.overhead.info] [" + info + "] but was [" + warn + "]")); + }, true, null); + + final Settings.Builder debugInfoOutOfOrderBuilder = Settings.builder(); + debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info); + final int debug = randomIntBetween(info + 1, 99); + debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.debug", debug); + debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", randomIntBetween(debug + 1, 100)); // or the test will fail for the wrong reason + execute(debugInfoOutOfOrderBuilder.build(), (command, interval) -> null, t -> { + assertThat(t, instanceOf(IllegalArgumentException.class)); + assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.info] must be greater than [monitor.jvm.gc.overhead.debug] [" + debug + "] but was [" + info + "]")); + }, true, null); + } + private static void execute(Settings settings, BiFunction> scheduler, Runnable asserts) throws InterruptedException { execute(settings, scheduler, null, false, asserts); } diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java index 2c17fca7c8c..ab5b1ac4750 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceTests.java @@ -133,4 +133,43 @@ public class JvmGcMonitorServiceTests extends ESTestCase { verifyNoMoreInteractions(logger); } + public void testGcOverheadLogging() { + final JvmGcMonitorService.JvmMonitor.Threshold threshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values()); + final int current = randomIntBetween(1, Integer.MAX_VALUE); + final long elapsed = randomIntBetween(current, Integer.MAX_VALUE); + final long seq = randomIntBetween(1, Integer.MAX_VALUE); + final ESLogger logger = mock(ESLogger.class); + when(logger.isWarnEnabled()).thenReturn(true); + when(logger.isInfoEnabled()).thenReturn(true); + when(logger.isDebugEnabled()).thenReturn(true); + JvmGcMonitorService.logGcOverhead(logger, threshold, current, elapsed, seq); + switch(threshold) { + case WARN: + verify(logger).isWarnEnabled(); + verify(logger).warn( + "[gc][{}] overhead, spent [{}] collecting in the last [{}]", + seq, + TimeValue.timeValueMillis(current), + TimeValue.timeValueMillis(elapsed)); + break; + case INFO: + verify(logger).isInfoEnabled(); + verify(logger).info( + "[gc][{}] overhead, spent [{}] collecting in the last [{}]", + seq, + TimeValue.timeValueMillis(current), + TimeValue.timeValueMillis(elapsed)); + break; + case DEBUG: + verify(logger).isDebugEnabled(); + verify(logger).debug( + "[gc][{}] overhead, spent [{}] collecting in the last [{}]", + seq, + TimeValue.timeValueMillis(current), + TimeValue.timeValueMillis(elapsed)); + break; + } + verifyNoMoreInteractions(logger); + } + } diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java index 8d3ddeec84e..91862e9cd18 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java @@ -41,10 +41,12 @@ import static org.mockito.Mockito.when; public class JvmMonitorTests extends ESTestCase { + private static final JvmGcMonitorService.GcOverheadThreshold IGNORE = new JvmGcMonitorService.GcOverheadThreshold(0, 0, 0); + public void testMonitorFailure() { AtomicBoolean shouldFail = new AtomicBoolean(); AtomicBoolean invoked = new AtomicBoolean(); - JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap()) { + JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) { @Override void onMonitorFailure(Throwable t) { invoked.set(true); @@ -53,7 +55,7 @@ public class JvmMonitorTests extends ESTestCase { } @Override - synchronized void monitorLongGc() { + synchronized void monitorGc() { if (shouldFail.get()) { throw new RuntimeException("simulated"); } @@ -62,6 +64,10 @@ public class JvmMonitorTests extends ESTestCase { @Override void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) { } + + @Override + void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) { + } }; monitor.run(); @@ -166,7 +172,7 @@ public class JvmMonitorTests extends ESTestCase { final AtomicInteger count = new AtomicInteger(); - JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds) { + JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds, IGNORE) { @Override void onMonitorFailure(Throwable t) { } @@ -198,6 +204,10 @@ public class JvmMonitorTests extends ESTestCase { } } + @Override + void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) { + } + @Override long now() { return now.get(); @@ -213,7 +223,7 @@ public class JvmMonitorTests extends ESTestCase { now.set(start + TimeUnit.NANOSECONDS.convert(expectedElapsed, TimeUnit.MILLISECONDS)); jvmStats.set(monitorJvmStats); - monitor.monitorLongGc(); + monitor.monitorGc(); assertThat(count.get(), equalTo((youngGcThreshold ? 1 : 0) + (oldGcThreshold ? 1 : 0))); } @@ -235,14 +245,140 @@ public class JvmMonitorTests extends ESTestCase { private JvmStats jvmStats(JvmStats.GarbageCollector youngCollector, JvmStats.GarbageCollector oldCollector) { final JvmStats jvmStats = mock(JvmStats.class); - final JvmStats.GarbageCollectors initialGcs = mock(JvmStats.GarbageCollectors.class); - final JvmStats.GarbageCollector[] initialCollectors = new JvmStats.GarbageCollector[2]; - initialCollectors[0] = youngCollector; - initialCollectors[1] = oldCollector; - when(initialGcs.getCollectors()).thenReturn(initialCollectors); - when(jvmStats.getGc()).thenReturn(initialGcs); + final JvmStats.GarbageCollectors gcs = mock(JvmStats.GarbageCollectors.class); + final JvmStats.GarbageCollector[] collectors = new JvmStats.GarbageCollector[2]; + collectors[0] = youngCollector; + collectors[1] = oldCollector; + when(gcs.getCollectors()).thenReturn(collectors); + when(jvmStats.getGc()).thenReturn(gcs); when(jvmStats.getMem()).thenReturn(JvmStats.jvmStats().getMem()); return jvmStats; } + public void testMonitorGc() { + final int youngCollectionCount = randomIntBetween(1, 16); + final int youngCollectionIncrement = randomIntBetween(1, 16); + final int youngCollectionTime = randomIntBetween(1, 1 << 10); + final int youngCollectionTimeIncrement = randomIntBetween(1, 1 << 10); + final int oldCollectionCount = randomIntBetween(1, 16); + final int oldCollectionIncrement = randomIntBetween(1, 16); + final int oldCollectionTime = randomIntBetween(1, 1 << 10); + final int oldCollectionTimeIncrement = randomIntBetween(1, 1 << 10); + + final JvmStats.GarbageCollector lastYoungCollector = collector("young", youngCollectionCount, youngCollectionTime); + final JvmStats.GarbageCollector lastOldCollector = collector("old", oldCollectionCount, oldCollectionTime); + final JvmStats lastjvmStats = jvmStats(lastYoungCollector, lastOldCollector); + + final JvmStats.GarbageCollector currentYoungCollector = + collector("young", youngCollectionCount + youngCollectionIncrement, youngCollectionTime + youngCollectionTimeIncrement); + final JvmStats.GarbageCollector currentOldCollector = + collector("old", oldCollectionCount + oldCollectionIncrement, oldCollectionTime + oldCollectionTimeIncrement); + final JvmStats currentJvmStats = jvmStats(currentYoungCollector, currentOldCollector); + final long expectedElapsed = + randomIntBetween( + Math.max(youngCollectionTime + youngCollectionTimeIncrement, oldCollectionTime + oldCollectionTimeIncrement), + Integer.MAX_VALUE); + + final AtomicBoolean invoked = new AtomicBoolean(); + + final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) { + + @Override + void onMonitorFailure(Throwable t) { + } + + @Override + void onSlowGc(Threshold threshold, long seq, SlowGcEvent slowGcEvent) { + } + + @Override + void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) { + } + + @Override + void checkGcOverhead(long current, long elapsed, long seq) { + invoked.set(true); + assertThat(current, equalTo((long)(youngCollectionTimeIncrement + oldCollectionTimeIncrement))); + assertThat(elapsed, equalTo(expectedElapsed)); + } + + @Override + JvmStats jvmStats() { + return lastjvmStats; + } + }; + + monitor.monitorGcOverhead(currentJvmStats, expectedElapsed); + assertTrue(invoked.get()); + } + + private JvmStats.GarbageCollector collector(final String name, final int collectionCount, final int collectionTime) { + final JvmStats.GarbageCollector gc = mock(JvmStats.GarbageCollector.class); + when(gc.getName()).thenReturn(name); + when(gc.getCollectionCount()).thenReturn((long)collectionCount); + when(gc.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(collectionTime)); + return gc; + } + + public void testCheckGcOverhead() { + final int debugThreshold = randomIntBetween(1, 98); + final int infoThreshold = randomIntBetween(debugThreshold + 1, 99); + final int warnThreshold = randomIntBetween(infoThreshold + 1, 100); + final JvmGcMonitorService.GcOverheadThreshold gcOverheadThreshold = + new JvmGcMonitorService.GcOverheadThreshold(warnThreshold, infoThreshold, debugThreshold); + + final JvmGcMonitorService.JvmMonitor.Threshold expectedThreshold; + int fraction = 0; + final long expectedCurrent; + final long expectedElapsed; + if (randomBoolean()) { + expectedThreshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values()); + switch (expectedThreshold) { + case WARN: + fraction = randomIntBetween(warnThreshold, 100); + break; + case INFO: + fraction = randomIntBetween(infoThreshold, warnThreshold - 1); + break; + case DEBUG: + fraction = randomIntBetween(debugThreshold, infoThreshold - 1); + break; + } + } else { + expectedThreshold = null; + fraction = randomIntBetween(0, debugThreshold - 1); + } + + expectedElapsed = 100 * randomIntBetween(1, 1000); + expectedCurrent = fraction * expectedElapsed / 100; + + final AtomicBoolean invoked = new AtomicBoolean(); + final long expectedSeq = randomIntBetween(1, Integer.MAX_VALUE); + + final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), gcOverheadThreshold) { + + @Override + void onMonitorFailure(final Throwable t) { + } + + @Override + void onSlowGc(Threshold threshold, long seq, SlowGcEvent slowGcEvent) { + } + + @Override + void onGcOverhead(final Threshold threshold, final long current, final long elapsed, final long seq) { + invoked.set(true); + assertThat(threshold, equalTo(expectedThreshold)); + assertThat(current, equalTo(expectedCurrent)); + assertThat(elapsed, equalTo(expectedElapsed)); + assertThat(seq, equalTo(expectedSeq)); + } + + }; + + monitor.checkGcOverhead(expectedCurrent, expectedElapsed, expectedSeq); + + assertThat(invoked.get(), equalTo(expectedThreshold != null)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index 6c221e4eb36..e986ab1288f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -80,7 +80,7 @@ import static org.elasticsearch.cluster.service.ClusterServiceUtils.createCluste import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; -public abstract class BaseAggregationTestCase> extends ESTestCase { +public abstract class BaseAggregationTestCase> extends ESTestCase { protected static final String STRING_FIELD_NAME = "mapped_string"; protected static final String INT_FIELD_NAME = "mapped_int"; @@ -238,7 +238,7 @@ public abstract class BaseAggregationTestCase> assertSame(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals(testAgg.type.name(), parser.currentName()); assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - AggregatorBuilder newAgg = aggParsers.parser(testAgg.getType(), ParseFieldMatcher.STRICT).parse(testAgg.name, parseContext); + AggregationBuilder newAgg = aggParsers.parser(testAgg.getType(), ParseFieldMatcher.STRICT).parse(testAgg.name, parseContext); assertSame(XContentParser.Token.END_OBJECT, parser.currentToken()); assertSame(XContentParser.Token.END_OBJECT, parser.nextToken()); assertSame(XContentParser.Token.END_OBJECT, parser.nextToken()); @@ -258,7 +258,7 @@ public abstract class BaseAggregationTestCase> try (BytesStreamOutput output = new BytesStreamOutput()) { output.writeNamedWriteable(testAgg); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { - AggregatorBuilder deserialized = in.readNamedWriteable(AggregatorBuilder.class); + AggregationBuilder deserialized = in.readNamedWriteable(AggregationBuilder.class); assertEquals(testAgg, deserialized); assertEquals(testAgg.hashCode(), deserialized.hashCode()); assertNotSame(testAgg, deserialized); @@ -299,7 +299,7 @@ public abstract class BaseAggregationTestCase> agg.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { @SuppressWarnings("unchecked") - AB secondAgg = (AB) namedWriteableRegistry.getReader(AggregatorBuilder.class, agg.getWriteableName()).read(in); + AB secondAgg = (AB) namedWriteableRegistry.getReader(AggregationBuilder.class, agg.getWriteableName()).read(in); return secondAgg; } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java index 3f8684c36db..c7844f29d05 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java @@ -20,15 +20,15 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregationBuilder; -public class ChildrenTests extends BaseAggregationTestCase { +public class ChildrenTests extends BaseAggregationTestCase { @Override - protected ChildrenAggregatorBuilder createTestAggregatorBuilder() { + protected ChildrenAggregationBuilder createTestAggregatorBuilder() { String name = randomAsciiOfLengthBetween(3, 20); String childType = randomAsciiOfLengthBetween(5, 40); - ChildrenAggregatorBuilder factory = new ChildrenAggregatorBuilder(name, childType); + ChildrenAggregationBuilder factory = new ChildrenAggregationBuilder(name, childType); return factory; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 76b5558df80..cdb722ff9dd 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -1146,4 +1147,27 @@ public class DateHistogramIT extends ESIntegTestCase { Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), greaterThan(0)); } + + /** + * When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets: + * "2015-10-25T02:00:00.000+02:00", + * "2015-10-25T02:00:00.000+01:00", + * "2015-10-25T03:00:00.000+01:00", + * "2015-10-25T04:00:00.000+01:00". + */ + public void testDSTEndTransition() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(new MatchNoneQueryBuilder()) + .addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("Europe/Oslo")) + .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( + new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) + .execute().actionGet(); + + Histogram histo = response.getAggregations().get("histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L)); + assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L)); + assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index 58641a43b68..74ea18cc1d1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -20,16 +20,16 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; -public class DateHistogramTests extends BaseAggregationTestCase { +public class DateHistogramTests extends BaseAggregationTestCase { @Override - protected DateHistogramAggregatorBuilder createTestAggregatorBuilder() { - DateHistogramAggregatorBuilder factory = new DateHistogramAggregatorBuilder("foo"); + protected DateHistogramAggregationBuilder createTestAggregatorBuilder() { + DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder("foo"); factory.field(INT_FIELD_NAME); if (randomBoolean()) { factory.interval(randomIntBetween(1, 100000)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index b1dc61a9b9e..a95b3cd4871 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.search.aggregations.bucket.DateScriptMocks.DateScriptsM import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; -import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -117,7 +117,7 @@ public class DateRangeIT extends ESIntegTestCase { public void testDateMath() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); - DateRangeAggregatorBuilder rangeBuilder = dateRange("range"); + DateRangeAggregationBuilder rangeBuilder = dateRange("range"); if (randomBoolean()) { rangeBuilder.field("date"); } else { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java index 71b61c0e6e6..eba3790330b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java @@ -21,17 +21,17 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; -import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder; import org.joda.time.DateTimeZone; -public class DateRangeTests extends BaseAggregationTestCase { +public class DateRangeTests extends BaseAggregationTestCase { private final static String[] timeZoneIds = DateTimeZone.getAvailableIDs().toArray(new String[DateTimeZone.getAvailableIDs().size()]); @Override - protected DateRangeAggregatorBuilder createTestAggregatorBuilder() { + protected DateRangeAggregationBuilder createTestAggregatorBuilder() { int numRanges = randomIntBetween(1, 10); - DateRangeAggregatorBuilder factory = new DateRangeAggregatorBuilder("foo"); + DateRangeAggregationBuilder factory = new DateRangeAggregationBuilder("foo"); for (int i = 0; i < numRanges; i++) { String key = null; if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 6635a674b6a..96fbf17480c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -22,12 +22,12 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.test.ESIntegTestCase; @@ -128,7 +128,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { public void testSimpleDiversity() throws Exception { int MAX_DOCS_PER_AUTHOR = 1; - DiversifiedAggregatorBuilder sampleAgg = new DiversifiedAggregatorBuilder("sample").shardSize(100); + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("test") @@ -151,9 +151,9 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { public void testNestedDiversity() throws Exception { // Test multiple samples gathered under buckets made by a parent agg int MAX_DOCS_PER_AUTHOR = 1; - TermsAggregatorBuilder rootTerms = terms("genres").field("genre"); + TermsAggregationBuilder rootTerms = terms("genres").field("genre"); - DiversifiedAggregatorBuilder sampleAgg = new DiversifiedAggregatorBuilder("sample").shardSize(100); + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); @@ -178,11 +178,11 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { // Test samples nested under samples int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; - DiversifiedAggregatorBuilder rootSample = new DiversifiedAggregatorBuilder("genreSample").shardSize(100) + DiversifiedAggregationBuilder rootSample = new DiversifiedAggregationBuilder("genreSample").shardSize(100) .field("genre") .maxDocsPerValue(MAX_DOCS_PER_GENRE); - DiversifiedAggregatorBuilder sampleAgg = new DiversifiedAggregatorBuilder("sample").shardSize(100); + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); sampleAgg.subAggregation(terms("genres").field("genre")); @@ -210,7 +210,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { public void testPartiallyUnmappedDiversifyField() throws Exception { // One of the indexes is missing the "author" field used for // diversifying results - DiversifiedAggregatorBuilder sampleAgg = new DiversifiedAggregatorBuilder("sample").shardSize(100).field("author") + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100).field("author") .maxDocsPerValue(1); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_AND_FETCH) @@ -226,7 +226,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { public void testWhollyUnmappedDiversifyField() throws Exception { //All of the indices are missing the "author" field used for diversifying results int MAX_DOCS_PER_AUTHOR = 1; - DiversifiedAggregatorBuilder sampleAgg = new DiversifiedAggregatorBuilder("sample").shardSize(100); + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_AND_FETCH) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerTests.java index 512d7a8d69b..b68caad0ea7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerTests.java @@ -21,14 +21,14 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator.ExecutionMode; -public class DiversifiedSamplerTests extends BaseAggregationTestCase { +public class DiversifiedSamplerTests extends BaseAggregationTestCase { @Override - protected final DiversifiedAggregatorBuilder createTestAggregatorBuilder() { - DiversifiedAggregatorBuilder factory = new DiversifiedAggregatorBuilder("foo"); + protected final DiversifiedAggregationBuilder createTestAggregatorBuilder() { + DiversifiedAggregationBuilder factory = new DiversifiedAggregationBuilder("foo"); String field = randomNumericField(); int randomFieldBranch = randomInt(3); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java index 89dd3e3b137..65b80537c7a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java @@ -23,17 +23,17 @@ import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser.Range; import org.elasticsearch.test.geo.RandomShapeGenerator; -public class GeoDistanceRangeTests extends BaseAggregationTestCase { +public class GeoDistanceRangeTests extends BaseAggregationTestCase { @Override - protected GeoDistanceAggregatorBuilder createTestAggregatorBuilder() { + protected GeoDistanceAggregationBuilder createTestAggregatorBuilder() { int numRanges = randomIntBetween(1, 10); GeoPoint origin = RandomShapeGenerator.randomPoint(random()); - GeoDistanceAggregatorBuilder factory = new GeoDistanceAggregatorBuilder("foo", origin); + GeoDistanceAggregationBuilder factory = new GeoDistanceAggregationBuilder("foo", origin); for (int i = 0; i < numRanges; i++) { String key = null; if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java index 34e3e266d6a..c3c8f6902b3 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java @@ -20,14 +20,14 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -public class GeoHashGridTests extends BaseAggregationTestCase { +public class GeoHashGridTests extends BaseAggregationTestCase { @Override - protected GeoGridAggregatorBuilder createTestAggregatorBuilder() { + protected GeoGridAggregationBuilder createTestAggregatorBuilder() { String name = randomAsciiOfLengthBetween(3, 20); - GeoGridAggregatorBuilder factory = new GeoGridAggregatorBuilder(name); + GeoGridAggregationBuilder factory = new GeoGridAggregationBuilder(name); if (randomBoolean()) { int precision = randomIntBetween(1, 12); factory.precision(precision); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java index ca2b5c9d6c0..a874eff839b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java @@ -20,13 +20,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -public class GlobalTests extends BaseAggregationTestCase { +public class GlobalTests extends BaseAggregationTestCase { @Override - protected GlobalAggregatorBuilder createTestAggregatorBuilder() { - return new GlobalAggregatorBuilder(randomAsciiOfLengthBetween(3, 20)); + protected GlobalAggregationBuilder createTestAggregatorBuilder() { + return new GlobalAggregationBuilder(randomAsciiOfLengthBetween(3, 20)); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java index 1cd930ecc31..ac0d6d0df8b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java @@ -22,13 +22,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -public class HistogramTests extends BaseAggregationTestCase { +public class HistogramTests extends BaseAggregationTestCase { @Override - protected HistogramAggregatorBuilder createTestAggregatorBuilder() { - HistogramAggregatorBuilder factory = new HistogramAggregatorBuilder("foo"); + protected HistogramAggregationBuilder createTestAggregatorBuilder() { + HistogramAggregationBuilder factory = new HistogramAggregationBuilder("foo"); factory.field(INT_FIELD_NAME); factory.interval(randomIntBetween(1, 100000)); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index 468e8503b0a..3a2abda6aa7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.test.ESIntegTestCase; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeTests.java index db31f576e0c..5d86571f08f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeTests.java @@ -24,9 +24,9 @@ import java.net.UnknownHostException; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder; -public class IpRangeTests extends BaseAggregationTestCase { +public class IpRangeTests extends BaseAggregationTestCase { private static String randomIp(boolean v4) { try { @@ -45,9 +45,9 @@ public class IpRangeTests extends BaseAggregationTestCase> builder(); + public abstract ValuesSourceAggregationBuilder.LeafOnly> builder(); public String sortKey() { return name; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java index b9c60dab1be..22b4eae8421 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java @@ -21,14 +21,14 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; -public class RangeTests extends BaseAggregationTestCase { +public class RangeTests extends BaseAggregationTestCase { @Override - protected RangeAggregatorBuilder createTestAggregatorBuilder() { + protected RangeAggregationBuilder createTestAggregatorBuilder() { int numRanges = randomIntBetween(1, 10); - RangeAggregatorBuilder factory = new RangeAggregatorBuilder("foo"); + RangeAggregationBuilder factory = new RangeAggregationBuilder("foo"); for (int i = 0; i < numRanges; i++) { String key = null; if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index f42d213bb57..980d792013a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator; -import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.max.Max; @@ -123,7 +123,7 @@ public class SamplerIT extends ESIntegTestCase { } public void testSimpleSampler() throws Exception { - SamplerAggregatorBuilder sampleAgg = sampler("sample").shardSize(100); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_AND_FETCH) .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).execute().actionGet(); @@ -140,7 +140,7 @@ public class SamplerIT extends ESIntegTestCase { } public void testUnmappedChildAggNoDiversity() throws Exception { - SamplerAggregatorBuilder sampleAgg = sampler("sample").shardSize(100); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped") .setSearchType(SearchType.QUERY_AND_FETCH) @@ -157,7 +157,7 @@ public class SamplerIT extends ESIntegTestCase { } public void testPartiallyUnmappedChildAggNoDiversity() throws Exception { - SamplerAggregatorBuilder sampleAgg = sampler("sample").shardSize(100); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped", "test") .setSearchType(SearchType.QUERY_AND_FETCH) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java index 8d792fd72ff..e4de490f6b2 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java @@ -20,13 +20,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; -public class SamplerTests extends BaseAggregationTestCase { +public class SamplerTests extends BaseAggregationTestCase { @Override - protected final SamplerAggregatorBuilder createTestAggregatorBuilder() { - SamplerAggregatorBuilder factory = new SamplerAggregatorBuilder("foo"); + protected final SamplerAggregationBuilder createTestAggregatorBuilder() { + SamplerAggregationBuilder factory = new SamplerAggregationBuilder("foo"); if (randomBoolean()) { factory.shardSize(randomIntBetween(1, 1000)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java index 373eb0e6e96..897125ee2fa 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java @@ -24,7 +24,7 @@ import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; @@ -37,7 +37,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude import java.util.SortedSet; import java.util.TreeSet; -public class SignificantTermsTests extends BaseAggregationTestCase { +public class SignificantTermsTests extends BaseAggregationTestCase { private static final String[] executionHints; @@ -50,9 +50,9 @@ public class SignificantTermsTests extends BaseAggregationTestCase { +public class TermsTests extends BaseAggregationTestCase { private static final String[] executionHints; @@ -46,9 +46,9 @@ public class TermsTests extends BaseAggregationTestCase } @Override - protected TermsAggregatorBuilder createTestAggregatorBuilder() { + protected TermsAggregationBuilder createTestAggregatorBuilder() { String name = randomAsciiOfLengthBetween(3, 20); - TermsAggregatorBuilder factory = new TermsAggregatorBuilder(name, null); + TermsAggregationBuilder factory = new TermsAggregationBuilder(name, null); String field = randomAsciiOfLengthBetween(3, 20); int randomFieldBranch = randomInt(2); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index dbe10e2ff9d..272aa70d48b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -123,7 +123,7 @@ public class NestedAggregatorTests extends ESSingleNodeTestCase { AggregationContext context = new AggregationContext(searchContext); AggregatorFactories.Builder builder = AggregatorFactories.builder(); - NestedAggregatorBuilder factory = new NestedAggregatorBuilder("test", "nested_field"); + NestedAggregationBuilder factory = new NestedAggregationBuilder("test", "nested_field"); builder.addAggregator(factory); AggregatorFactories factories = builder.build(context, null); searchContext.aggregations(new SearchContextAggregations(factories)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedTests.java index 6ea5b3791d8..29dde100a08 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedTests.java @@ -21,11 +21,11 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -public class NestedTests extends BaseAggregationTestCase { +public class NestedTests extends BaseAggregationTestCase { @Override - protected NestedAggregatorBuilder createTestAggregatorBuilder() { - return new NestedAggregatorBuilder(randomAsciiOfLengthBetween(1, 20), randomAsciiOfLengthBetween(3, 40)); + protected NestedAggregationBuilder createTestAggregatorBuilder() { + return new NestedAggregationBuilder(randomAsciiOfLengthBetween(1, 20), randomAsciiOfLengthBetween(3, 40)); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedTests.java index 1a45c550bc1..97dbf3718af 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedTests.java @@ -21,11 +21,11 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -public class ReverseNestedTests extends BaseAggregationTestCase { +public class ReverseNestedTests extends BaseAggregationTestCase { @Override - protected ReverseNestedAggregatorBuilder createTestAggregatorBuilder() { - ReverseNestedAggregatorBuilder factory = new ReverseNestedAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected ReverseNestedAggregationBuilder createTestAggregatorBuilder() { + ReverseNestedAggregationBuilder factory = new ReverseNestedAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); if (randomBoolean()) { factory.path(randomAsciiOfLengthBetween(3, 40)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index df449aeeaf2..8dc015b30ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -257,7 +257,7 @@ public class SignificanceHeuristicTests extends ESTestCase { protected SignificanceHeuristic parseFromBuilder(ParseFieldRegistry significanceHeuristicParserRegistry, SearchContext searchContext, SignificanceHeuristic significanceHeuristic) throws IOException { - SignificantTermsAggregatorBuilder stBuilder = significantTerms("testagg"); + SignificantTermsAggregationBuilder stBuilder = significantTerms("testagg"); stBuilder.significanceHeuristic(significanceHeuristic).field("text").minDocCount(200); XContentBuilder stXContentBuilder = XContentFactory.jsonBuilder(); stBuilder.internalXContent(stXContentBuilder, null); @@ -271,7 +271,7 @@ public class SignificanceHeuristicTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(); QueryParseContext parseContext = new QueryParseContext(registry, stParser, ParseFieldMatcher.STRICT); stParser.nextToken(); - SignificantTermsAggregatorBuilder aggregatorFactory = (SignificantTermsAggregatorBuilder) new SignificantTermsParser( + SignificantTermsAggregationBuilder aggregatorFactory = (SignificantTermsAggregationBuilder) new SignificantTermsParser( significanceHeuristicParserRegistry, registry).parse("testagg", parseContext); stParser.nextToken(); assertThat(aggregatorFactory.getBucketCountThresholds().getMinDocCount(), equalTo(200L)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericMetricTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericMetricTestCase.java index 58d7fa70d62..f1ccf344a7c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericMetricTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericMetricTestCase.java @@ -22,9 +22,9 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; -public abstract class AbstractNumericMetricTestCase> +public abstract class AbstractNumericMetricTestCase> extends BaseAggregationTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java index 61e685169f6..df90dc4f7c3 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -public class AvgTests extends AbstractNumericMetricTestCase { +public class AvgTests extends AbstractNumericMetricTestCase { @Override - protected AvgAggregatorBuilder doCreateTestAggregatorFactory() { - return new AvgAggregatorBuilder("foo"); + protected AvgAggregationBuilder doCreateTestAggregatorFactory() { + return new AvgAggregationBuilder("foo"); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java index 4a7ca7e8b38..3f78cc17aa9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; -public class ExtendedStatsTests extends AbstractNumericMetricTestCase { +public class ExtendedStatsTests extends AbstractNumericMetricTestCase { @Override - protected ExtendedStatsAggregatorBuilder doCreateTestAggregatorFactory() { - ExtendedStatsAggregatorBuilder factory = new ExtendedStatsAggregatorBuilder("foo"); + protected ExtendedStatsAggregationBuilder doCreateTestAggregatorFactory() { + ExtendedStatsAggregationBuilder factory = new ExtendedStatsAggregationBuilder("foo"); if (randomBoolean()) { factory.sigma(randomDoubleBetween(0.0, 10.0, true)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FilterTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FilterTests.java index 8a6a4373691..1b563d531a8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FilterTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FilterTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; -public class FilterTests extends BaseAggregationTestCase { +public class FilterTests extends BaseAggregationTestCase { @Override - protected FilterAggregatorBuilder createTestAggregatorBuilder() { - FilterAggregatorBuilder factory = new FilterAggregatorBuilder(randomAsciiOfLengthBetween(1, 20), + protected FilterAggregationBuilder createTestAggregatorBuilder() { + FilterAggregationBuilder factory = new FilterAggregationBuilder(randomAsciiOfLengthBetween(1, 20), QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); // NORELEASE make RandomQueryBuilder work outside of the // AbstractQueryTestCase diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FiltersTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FiltersTests.java index cd2dae53327..89fc38b7cd8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FiltersTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/FiltersTests.java @@ -24,15 +24,15 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter; -import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder; -public class FiltersTests extends BaseAggregationTestCase { +public class FiltersTests extends BaseAggregationTestCase { @Override - protected FiltersAggregatorBuilder createTestAggregatorBuilder() { + protected FiltersAggregationBuilder createTestAggregatorBuilder() { int size = randomIntBetween(1, 20); - FiltersAggregatorBuilder factory; + FiltersAggregationBuilder factory; if (randomBoolean()) { KeyedFilter[] filters = new KeyedFilter[size]; int i = 0; @@ -40,13 +40,13 @@ public class FiltersTests extends BaseAggregationTestCase { +public class GeoBoundsTests extends BaseAggregationTestCase { @Override - protected GeoBoundsAggregatorBuilder createTestAggregatorBuilder() { - GeoBoundsAggregatorBuilder factory = new GeoBoundsAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected GeoBoundsAggregationBuilder createTestAggregatorBuilder() { + GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); String field = randomAsciiOfLengthBetween(3, 20); factory.field(field); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java index c912c18a82e..1ea21a1ff1d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder; -public class GeoCentroidTests extends BaseAggregationTestCase { +public class GeoCentroidTests extends BaseAggregationTestCase { @Override - protected GeoCentroidAggregatorBuilder createTestAggregatorBuilder() { - GeoCentroidAggregatorBuilder factory = new GeoCentroidAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected GeoCentroidAggregationBuilder createTestAggregatorBuilder() { + GeoCentroidAggregationBuilder factory = new GeoCentroidAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); String field = randomNumericField(); int randomFieldBranch = randomInt(3); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java index a9fe4654c9d..6ffd824aa3c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -public class MaxTests extends AbstractNumericMetricTestCase { +public class MaxTests extends AbstractNumericMetricTestCase { @Override - protected MaxAggregatorBuilder doCreateTestAggregatorFactory() { - return new MaxAggregatorBuilder("foo"); + protected MaxAggregationBuilder doCreateTestAggregatorFactory() { + return new MaxAggregationBuilder("foo"); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java index 54512f579f3..eed4059ade7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -public class MinTests extends AbstractNumericMetricTestCase { +public class MinTests extends AbstractNumericMetricTestCase { @Override - protected MinAggregatorBuilder doCreateTestAggregatorFactory() { - return new MinAggregatorBuilder("foo"); + protected MinAggregationBuilder doCreateTestAggregatorFactory() { + return new MinAggregationBuilder("foo"); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MissingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MissingTests.java index 3f49da5eb6e..979747ade2e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MissingTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MissingTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; -public class MissingTests extends BaseAggregationTestCase { +public class MissingTests extends BaseAggregationTestCase { @Override - protected final MissingAggregatorBuilder createTestAggregatorBuilder() { - MissingAggregatorBuilder factory = new MissingAggregatorBuilder("foo", null); + protected final MissingAggregationBuilder createTestAggregatorBuilder() { + MissingAggregationBuilder factory = new MissingAggregationBuilder("foo", null); String field = randomNumericField(); int randomFieldBranch = randomInt(3); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java index 4636e4ed174..1907733fbd8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; -public class PercentileRanksTests extends BaseAggregationTestCase { +public class PercentileRanksTests extends BaseAggregationTestCase { @Override - protected PercentileRanksAggregatorBuilder createTestAggregatorBuilder() { - PercentileRanksAggregatorBuilder factory = new PercentileRanksAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected PercentileRanksAggregationBuilder createTestAggregatorBuilder() { + PercentileRanksAggregationBuilder factory = new PercentileRanksAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); if (randomBoolean()) { factory.keyed(randomBoolean()); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java index 674197cffbf..b5539f8c1be 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; -public class PercentilesTests extends BaseAggregationTestCase { +public class PercentilesTests extends BaseAggregationTestCase { @Override - protected PercentilesAggregatorBuilder createTestAggregatorBuilder() { - PercentilesAggregatorBuilder factory = new PercentilesAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected PercentilesAggregationBuilder createTestAggregatorBuilder() { + PercentilesAggregationBuilder factory = new PercentilesAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); if (randomBoolean()) { factory.keyed(randomBoolean()); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java index a4e12b56d8e..e4f96fae762 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java @@ -22,16 +22,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; import java.util.HashMap; import java.util.Map; -public class ScriptedMetricTests extends BaseAggregationTestCase { +public class ScriptedMetricTests extends BaseAggregationTestCase { @Override - protected ScriptedMetricAggregatorBuilder createTestAggregatorBuilder() { - ScriptedMetricAggregatorBuilder factory = new ScriptedMetricAggregatorBuilder(randomAsciiOfLengthBetween(1, 20)); + protected ScriptedMetricAggregationBuilder createTestAggregatorBuilder() { + ScriptedMetricAggregationBuilder factory = new ScriptedMetricAggregationBuilder(randomAsciiOfLengthBetween(1, 20)); if (randomBoolean()) { factory.initScript(randomScript("initScript")); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java index 5db4e1e332b..76a8e9aa98a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder; -public class StatsTests extends AbstractNumericMetricTestCase { +public class StatsTests extends AbstractNumericMetricTestCase { @Override - protected StatsAggregatorBuilder doCreateTestAggregatorFactory() { - return new StatsAggregatorBuilder("foo"); + protected StatsAggregationBuilder doCreateTestAggregatorFactory() { + return new StatsAggregationBuilder("foo"); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java index a6d9f0bd270..edc6d4edef0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -public class SumTests extends AbstractNumericMetricTestCase { +public class SumTests extends AbstractNumericMetricTestCase { @Override - protected SumAggregatorBuilder doCreateTestAggregatorFactory() { - return new SumAggregatorBuilder("foo"); + protected SumAggregationBuilder doCreateTestAggregatorFactory() { + return new SumAggregationBuilder("foo"); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index 00bb04dde9f..c79ab04e492 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilderTests; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; @@ -38,11 +38,11 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; -public class TopHitsTests extends BaseAggregationTestCase { +public class TopHitsTests extends BaseAggregationTestCase { @Override - protected final TopHitsAggregatorBuilder createTestAggregatorBuilder() { - TopHitsAggregatorBuilder factory = new TopHitsAggregatorBuilder("foo"); + protected final TopHitsAggregationBuilder createTestAggregatorBuilder() { + TopHitsAggregationBuilder factory = new TopHitsAggregationBuilder("foo"); if (randomBoolean()) { factory.from(randomIntBetween(0, 10000)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java index c9b601c4e8b..99d4d41839c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; -public class ValueCountTests extends BaseAggregationTestCase { +public class ValueCountTests extends BaseAggregationTestCase { @Override - protected final ValueCountAggregatorBuilder createTestAggregatorBuilder() { - ValueCountAggregatorBuilder factory = new ValueCountAggregatorBuilder("foo", null); + protected final ValueCountAggregationBuilder createTestAggregatorBuilder() { + ValueCountAggregationBuilder factory = new ValueCountAggregationBuilder("foo", null); String field = randomNumericField(); int randomFieldBranch = randomInt(3); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java index a769a71b8e5..ab0377c6331 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityTests.java @@ -22,11 +22,11 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -public class CardinalityTests extends BaseAggregationTestCase { +public class CardinalityTests extends BaseAggregationTestCase { @Override - protected final CardinalityAggregatorBuilder createTestAggregatorBuilder() { - CardinalityAggregatorBuilder factory = new CardinalityAggregatorBuilder("foo", null); + protected final CardinalityAggregationBuilder createTestAggregatorBuilder() { + CardinalityAggregationBuilder factory = new CardinalityAggregationBuilder("foo", null); String field = randomNumericField(); int randomFieldBranch = randomInt(3); switch (randomFieldBranch) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index 43b3b4d357c..ce9394692de 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -20,11 +20,11 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregatorBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregatorBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregatorBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregatorBuilder; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -109,27 +109,27 @@ public class PipelineAggregationHelperTests extends ESTestCase { * @param values Array of values to compute metric for * @param metric A metric builder which defines what kind of metric should be returned for the values */ - public static double calculateMetric(double[] values, ValuesSourceAggregatorBuilder metric) { + public static double calculateMetric(double[] values, ValuesSourceAggregationBuilder metric) { - if (metric instanceof MinAggregatorBuilder) { + if (metric instanceof MinAggregationBuilder) { double accumulator = Double.POSITIVE_INFINITY; for (double value : values) { accumulator = Math.min(accumulator, value); } return accumulator; - } else if (metric instanceof MaxAggregatorBuilder) { + } else if (metric instanceof MaxAggregationBuilder) { double accumulator = Double.NEGATIVE_INFINITY; for (double value : values) { accumulator = Math.max(accumulator, value); } return accumulator; - } else if (metric instanceof SumAggregatorBuilder) { + } else if (metric instanceof SumAggregationBuilder) { double accumulator = 0; for (double value : values) { accumulator += value; } return accumulator; - } else if (metric instanceof AvgAggregatorBuilder) { + } else if (metric instanceof AvgAggregationBuilder) { double accumulator = 0; for (double value : values) { accumulator += value; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 94ac6fc10ab..c16d8e8062e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -39,7 +39,7 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModelBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -79,7 +79,7 @@ public class MovAvgIT extends ESIntegTestCase { static int period; static HoltWintersModel.SeasonalityType seasonalityType; static BucketHelpers.GapPolicy gapPolicy; - static ValuesSourceAggregatorBuilder> metric; + static ValuesSourceAggregationBuilder> metric; static List mockHisto; static Map> testValues; @@ -1289,8 +1289,8 @@ public class MovAvgIT extends ESIntegTestCase { } } - private ValuesSourceAggregatorBuilder> randomMetric(String name, - String field) { + private ValuesSourceAggregationBuilder> randomMetric(String name, + String field) { int rand = randomIntBetween(0,3); switch (rand) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java index 66961c2fcbc..1b263d1af09 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests; import org.elasticsearch.search.aggregations.pipeline.SimpleValue; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -61,7 +61,7 @@ public class SerialDiffIT extends ESIntegTestCase { static int numBuckets; static int lag; static BucketHelpers.GapPolicy gapPolicy; - static ValuesSourceAggregatorBuilder> metric; + static ValuesSourceAggregationBuilder> metric; static List mockHisto; static Map> testValues; @@ -81,7 +81,7 @@ public class SerialDiffIT extends ESIntegTestCase { } } - private ValuesSourceAggregatorBuilder> randomMetric(String name, String field) { + private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { int rand = randomIntBetween(0,3); switch (rand) { diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 9f0113829ce..077d978a4ce 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -72,6 +72,7 @@ import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilderTests; import org.elasticsearch.search.rescore.QueryRescoreBuilderTests; +import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -561,6 +562,57 @@ public class SearchSourceBuilderTests extends ESTestCase { } } + /** + * test that we can parse the `rescore` element either as single object or as array + */ + public void testParseRescore() throws IOException { + { + String restContent = "{\n" + + " \"query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" + + " },\n" + + " \"rescore\": {" + + " \"window_size\": 50,\n" + + " \"query\": {\n" + + " \"rescore_query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"baz\" } }\n" + + " }\n" + + " }\n" + + " }\n" + + "}\n"; + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), + aggParsers, suggesters); + assertEquals(1, searchSourceBuilder.rescores().size()); + assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50), + searchSourceBuilder.rescores().get(0)); + } + } + + { + String restContent = "{\n" + + " \"query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" + + " },\n" + + " \"rescore\": [ {" + + " \"window_size\": 50,\n" + + " \"query\": {\n" + + " \"rescore_query\" : {\n" + + " \"match\": { \"content\": { \"query\": \"baz\" } }\n" + + " }\n" + + " }\n" + + " } ]\n" + + "}\n"; + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), + aggParsers, suggesters); + assertEquals(1, searchSourceBuilder.rescores().size()); + assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50), + searchSourceBuilder.rescores().get(0)); + } + } + } + public void testEmptyPostFilter() throws IOException { SearchSourceBuilder builder = new SearchSourceBuilder(); builder.postFilter(new EmptyQueryBuilder()); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 1a258abc9cc..90f5c65c066 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -1321,29 +1321,6 @@ public class ChildQuerySearchIT extends ESIntegTestCase { } } - public void testAddParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.refresh_interval", -1))); - ensureGreen(); - - String parentId = "p1"; - client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get(); - refresh(); - - try { - assertAcked(client().admin() - .indices() - .preparePutMapping("test") - .setType("child") - .setSource("_parent", "type=parent")); - fail("Shouldn't be able the add the _parent field pointing to an already existing parent type"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("can't add a _parent field that points to an already existing type")); - } - } - public void testParentChildCaching() throws Exception { assertAcked(prepareCreate("test") .setSettings( diff --git a/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java index fceedff8e70..84c3a03f2c8 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java @@ -20,7 +20,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.test.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.ESBlobStoreRepositoryIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/core/src/test/resources/indices/bwc/index-2.3.3.zip b/core/src/test/resources/indices/bwc/index-2.3.3.zip new file mode 100644 index 00000000000..aced41714fd Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.3.3.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.3.zip b/core/src/test/resources/indices/bwc/repo-2.3.3.zip new file mode 100644 index 00000000000..b94020ee800 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.3.3.zip differ diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 244ab8caa25..39db9929a54 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -75,6 +75,7 @@ DEFAULT_PLUGINS = ["analysis-icu", "mapper-murmur3", "mapper-size", "repository-azure", + "repository-gcs", "repository-hdfs", "repository-s3", "store-smb"] diff --git a/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec b/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec index a51d639bf7d..5a5877598e6 100755 --- a/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec +++ b/distribution/src/main/resources/bin/elasticsearch-systemd-pre-exec @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # CONF_FILE setting was removed if [ ! -z "$CONF_FILE" ]; then diff --git a/distribution/src/main/resources/bin/elasticsearch.in.sh b/distribution/src/main/resources/bin/elasticsearch.in.sh index 8f1b5566f90..58b26a2d6eb 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.sh +++ b/distribution/src/main/resources/bin/elasticsearch.in.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # check in case a user was using this mechanism if [ "x$ES_CLASSPATH" != "x" ]; then diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc new file mode 100644 index 00000000000..bed78b4cbbf --- /dev/null +++ b/docs/plugins/repository-gcs.asciidoc @@ -0,0 +1,216 @@ +[[repository-gcs]] +=== Google Cloud Storage Repository Plugin + +The GCS repository plugin adds support for using the https://cloud.google.com/storage/[Google Cloud Storage] +service as a repository for {ref}/modules-snapshots.html[Snapshot/Restore]. + +[[repository-gcs-install]] +[float] +==== Installation + +This plugin can be installed using the plugin manager: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin install repository-gcs +---------------------------------------------------------------- + +NOTE: The plugin requires new permission to be installed in order to work + +The plugin must be installed on every node in the cluster, and each node must +be restarted after installation. + +[[repository-gcs-remove]] +[float] +==== Removal + +The plugin can be removed with the following command: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin remove repository-gcs +---------------------------------------------------------------- + +The node must be stopped before removing the plugin. + +[[repository-gcs-usage]] +==== Getting started + +The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1) +to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first +need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new +project. Once your project is created, you must enable the Cloud Storage Service for your project. + +[[repository-gcs-creating-bucket]] +===== Creating a Bucket + +Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket] +as a container for all the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically +create buckets. + +To create a new bucket: + +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +2. Select your project +3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] +4. Click the "Create Bucket" button +5. Enter a the name of the new bucket +6. Select a storage class +7. Select a location +8. Click the "Create" button + +The bucket should now be created. + +[[repository-gcs-service-authentication]] +===== Service Authentication + +The plugin supports two authentication modes: + +* the built-in <>. This mode is +recommended if your elasticsearch node is running on a Compute Engine virtual machine. + +* the <> authentication mode. + +[[repository-gcs-using-compute-engine]] +===== Using Compute Engine +When running on Compute Engine, the plugin use the Google's built-in authentication mechanism to +authenticate on the Storage service. Compute Engine virtual machines are usually associated to a +default service account. This service account can be found in the VM instance details in the +https://console.cloud.google.com/compute/[Compute Engine console]. + +To indicate that a repository should use the built-in authentication, +the repository `service_account` setting must be set to `_default_`: + +[source,json] +---- +PUT _snapshot/my_gcs_repository_on_compute_engine +{ + "type": "gcs", + "settings": { + "bucket": "my_bucket", + "service_account": "_default_" + } +} +---- +// CONSOLE + +NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM +creation time, when "Storage" access can be configured to "Read/Write" permission. Check your +instance details at the section "Cloud API access scopes". + +[[repository-gcs-using-service-account]] +===== Using a Service Account +If your elasticsearch node is not running on Compute Engine, or if you don't want to use Google +built-in authentication mechanism, you can authenticate on the Storage service using a +https://cloud.google.com/iam/docs/overview#service_account[Service Account] file. + +To create a service account file: +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +2. Select your project +3. Got to the https://console.cloud.google.com/permissions[Permission] tab +4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab +5. Click on "Create service account" +6. Once created, select the new service account and download a JSON key file + +A service account file looks like this: + +[source,json] +---- +{ + "type": "service_account", + "project_id": "your-project-id", + "private_key_id": "...", + "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", + "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", + "client_id": "...", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "..." +} +---- + +This file must be copied in the `config` directory of the elasticsearch installation and on +every node of the cluster. + +To indicate that a repository should use a service account file: + +[source,json] +---- +PUT _snapshot/my_gcs_repository +{ + "type": "gcs", + "settings": { + "bucket": "my_bucket", + "service_account": "service_account.json" + } +} +---- +// CONSOLE + + +[[repository-gcs-bucket-permission]] +===== Set Bucket Permission + +The service account used to access the bucket must have the "Writer" access to the bucket: + +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +2. Select your project +3. Got to the [https://console.cloud.google.com/storage/browser]Storage Browser +4. Select the bucket and "Edit bucket permission" +5. The service account must be configured as a "User" with "Writer" access + + +[[repository-gcs-repository]] +==== Create a Repository + +Once everything is installed and every node is started, you can create a new repository that +uses Google Cloud Storage to store snapshots: + +[source,json] +---- +PUT _snapshot/my_gcs_repository +{ + "type": "gcs", + "settings": { + "bucket": "my_bucket", + "service_account": "service_account.json" + } +} +---- +// CONSOLE + +The following settings are supported: + +`bucket`:: + + The name of the bucket to be used for snapshots. (Mandatory) + +`service_account`:: + + The service account to use. It can be a relative path to a service account JSON file + or the value `_default_` that indicate to use built-in Compute Engine service account. + +`base_path`:: + + Specifies the path within bucket to repository data. Defaults to + the root of the bucket. + +`chunk_size`:: + + Big files can be broken down into chunks during snapshotting if needed. + The chunk size can be specified in bytes or by using size value notation, + i.e. `1g`, `10m`, `5k`. Defaults to `100m`. + +`compress`:: + + When set to `true` metadata files are stored in compressed format. This + setting doesn't affect index files that are already compressed by default. + Defaults to `false`. + +`application_name`:: + + Name used by the plugin when it uses the Google Cloud JSON API. Setting + a custom name can be useful to authenticate your cluster when requests + statistics are logged in the Google Cloud Platform. Default to `repository-gcs` diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 08557b9e03e..9a4e90bebd7 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -22,6 +22,10 @@ The Azure repository plugin adds support for using Azure as a repository. The Hadoop HDFS Repository plugin adds support for using HDFS as a repository. +<>:: + +The GCS repository plugin adds support for using Google Cloud Storage service as a repository. + [float] === Community contributed repository plugins @@ -37,3 +41,4 @@ include::repository-s3.asciidoc[] include::repository-hdfs.asciidoc[] +include::repository-gcs.asciidoc[] diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 465d2e60c77..f3b30e7f0c3 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -27,6 +27,8 @@ include::docs/get.asciidoc[] include::docs/delete.asciidoc[] +include::docs/delete-by-query.asciidoc[] + include::docs/update.asciidoc[] include::docs/update-by-query.asciidoc[] diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc new file mode 100644 index 00000000000..1562d8c515d --- /dev/null +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -0,0 +1,318 @@ +[[docs-delete-by-query]] +== Delete By Query API + +experimental[The delete-by-query API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + +The simplest usage of `_delete_by_query` just performs a deletion on every +document that match a query. Here is the API: + +[source,js] +-------------------------------------------------- +POST twitter/_delete_by_query +{ + "query": { <1> + "match": { + "message": "some message" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:big_twitter] + +<1> The query must be passed as a value to the `query` key, in the same +way as the <>. You can also use the `q` +parameter in the same way as the search api. + +That will return something like this: + +[source,js] +-------------------------------------------------- +{ + "took" : 147, + "timed_out": false, + "deleted": 119, + "batches": 1, + "version_conflicts": 0, + "noops": 0, + "retries": { + "bulk": 0, + "search": 0 + }, + "throttled_millis": 0, + "requests_per_second": "unlimited", + "throttled_until_millis": 0, + "total": 119, + "failures" : [ ] +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] + +`_delete_by_query` gets a snapshot of the index when it starts and deletes what +it finds using `internal` versioning. That means that you'll get a version +conflict if the document changes between the time when the snapshot was taken +and when the delete request is processed. When the versions match the document +is deleted. + +During the `_delete_by_query` execution, multiple search requests are sequentially +executed in order to find all the matching documents to delete. Every time a batch +of documents is found, a corresponding bulk request is executed to delete all +these documents. In case a search or bulk request got rejected, `_delete_by_query` + relies on a default policy to retry rejected requests (up to 10 times, with + exponential back off). Reaching the maximum retries limit causes the `_delete_by_query` + to abort and all failures are returned in the `failures` of the response. + The deletions that have been performed still stick. In other words, the process + is not rolled back, only aborted. While the first failure causes the abort all + failures that are returned by the failing bulk request are returned in the `failures` + element so it's possible for there to be quite a few. + +If you'd like to count version conflicts rather than cause them to abort then +set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. + +Back to the API format, you can limit `_delete_by_query` to a single type. This +will only delete `tweet` documents from the `twitter` index: + +[source,js] +-------------------------------------------------- +POST twitter/tweet/_delete_by_query?conflicts=proceed +{ + "query": { + "match_all": {} + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +It's also possible to delete documents of multiple indexes and multiple +types at once, just like the search API: + +[source,js] +-------------------------------------------------- +POST twitter,blog/tweet,post/_delete_by_query +{ + "query": { + "match_all": {} + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT twitter\nPUT blog\nGET _cluster\/health?wait_for_status=yellow\n/] + +If you provide `routing` then the routing is copied to the scroll query, +limiting the process to the shards that match that routing value: + +[source,js] +-------------------------------------------------- +POST twitter/_delete_by_query?routing=1 +{ + "query": { + "range" : { + "age" : { + "gte" : 10 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +By default `_delete_by_query` uses scroll batches of 1000. You can change the +batch size with the `scroll_size` URL parameter: + +[source,js] +-------------------------------------------------- +POST twitter/_delete_by_query?scroll_size=5000 +{ + "query": { + "term": { + "user": "kimchy" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + + +[float] +=== URL Parameters + +In addition to the standard parameters like `pretty`, the Delete By Query API +also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`. + +Sending the `refresh` will refresh all shards involved in the delete by query +once the request completes. This is different than the Delete API's `refresh` +parameter which causes just the shard that received the delete request +to be refreshed. + +If the request contains `wait_for_completion=false` then Elasticsearch will +perform some preflight checks, launch the request, and then return a `task` +which can be used with <> to cancel +or get the status of the task. For now, once the request is finished the task +is gone and the only place to look for the ultimate result of the task is in +the Elasticsearch log file. This will be fixed soon. + +`consistency` controls how many copies of a shard must respond to each write +request. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly how they work in the +<>. + +`requests_per_second` can be set to any decimal number (`1.4`, `6`, `1000`, etc) +and throttles the number of requests per second that the delete by query issues. +The throttling is done waiting between bulk batches so that it can manipulate +the scroll timeout. The wait time is the difference between the time it took the +batch to complete and the time `requests_per_second * requests_in_the_batch`. +Since the batch isn't broken into multiple bulk requests large batch sizes will +cause Elasticsearch to create many requests and then wait for a while before +starting the next set. This is "bursty" instead of "smooth". The default is +`unlimited` which is also the only non-number value that it accepts. + +[float] +=== Response body + +The JSON response looks like this: + +[source,js] +-------------------------------------------------- +{ + "took" : 639, + "deleted": 0, + "batches": 1, + "version_conflicts": 2, + "retries": 0, + "throttled_millis": 0, + "failures" : [ ] +} +-------------------------------------------------- + +`took`:: + +The number of milliseconds from start to end of the whole operation. + +`deleted`:: + +The number of documents that were successfully deleted. + +`batches`:: + +The number of scroll responses pulled back by the the delete by query. + +`version_conflicts`:: + +The number of version conflicts that the delete by query hit. + +`retries`:: + +The number of retries that the delete by query did in response to a full queue. + +`throttled_millis`:: + +Number of milliseconds the request slept to conform to `requests_per_second`. + +`failures`:: + +Array of all indexing failures. If this is non-empty then the request aborted +because of those failures. See `conflicts` for how to prevent version conflicts +from aborting the operation. + + +[float] +[[docs-delete-by-query-task-api]] +=== Works with the Task API + +While Delete By Query is running you can fetch their status using the +<>: + +[source,js] +-------------------------------------------------- +GET _tasks?detailed=true&action=*/delete/byquery +-------------------------------------------------- +// CONSOLE + +The responses looks like: + +[source,js] +-------------------------------------------------- +{ + "nodes" : { + "r1A2WoRbTwKZ516z6NEs5A" : { + "name" : "Tyrannus", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "attributes" : { + "testattr" : "test", + "portsfile" : "true" + }, + "tasks" : { + "r1A2WoRbTwKZ516z6NEs5A:36619" : { + "node" : "r1A2WoRbTwKZ516z6NEs5A", + "id" : 36619, + "type" : "transport", + "action" : "indices:data/write/delete/byquery", + "status" : { <1> + "total" : 6154, + "updated" : 0, + "created" : 0, + "deleted" : 3500, + "batches" : 36, + "version_conflicts" : 0, + "noops" : 0, + "retries": 0, + "throttled_millis": 0 + }, + "description" : "" + } + } + } + } +} +-------------------------------------------------- + +<1> this object contains the actual status. It is just like the response json +with the important addition of the `total` field. `total` is the total number +of operations that the reindex expects to perform. You can estimate the +progress by adding the `updated`, `created`, and `deleted` fields. The request +will finish when their sum is equal to the `total` field. + + +[float] +[[docs-delete-by-query-cancel-task-api]] +=== Works with the Cancel Task API + +Any Delete By Query can be canceled using the <>: + +[source,js] +-------------------------------------------------- +POST _tasks/taskid:1/_cancel +-------------------------------------------------- +// CONSOLE + +The `task_id` can be found using the tasks API above. + +Cancelation should happen quickly but might take a few seconds. The task status +API above will continue to list the task until it is wakes to cancel itself. + + +[float] +[[docs-delete-by-query-rethrottle]] +=== Rethrottling + +The value of `requests_per_second` can be changed on a running delete by query +using the `_rethrottle` API: + +[source,js] +-------------------------------------------------- +POST _delete_by_query/taskid:1/_rethrottle?requests_per_second=unlimited +-------------------------------------------------- +// CONSOLE + +The `task_id` can be found using the tasks API above. + +Just like when setting it on the `_delete_by_query` API `requests_per_second` +can be either `unlimited` to disable throttling or any decimal number like `1.7` +or `12` to throttle to that level. Rethrottling that speeds up the query takes +effect immediately but rethrotting that slows down the query will take effect +on after completing the current batch. This prevents scroll timeouts. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index e5a62ddf33a..ac4da4251be 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -61,7 +61,7 @@ and the time when it attempted to update the document. This is fine because that update will have picked up the online mapping update. Back to the API format, you can limit `_update_by_query` to a single type. This -will only update `tweet`s from the `twitter` index: +will only update `tweet` documents from the `twitter` index: [source,js] -------------------------------------------------- @@ -119,7 +119,7 @@ Just as in <> you can set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. That will cause `_update_by_query` to omit that document from its updates. Setting `ctx.op` to anything else is an error. If you want to delete by a query you can use the -{plugins}/plugins-delete-by-query.html[Delete by Query plugin] instead. Setting any +<> instead. Setting any other field in `ctx` is an error. Note that we stopped specifying `conflicts=proceed`. In this case we want a diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc index e916ec6b24c..85895d65b67 100644 --- a/docs/reference/migration/migrate_5_0/settings.asciidoc +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -258,3 +258,9 @@ Previously script mode settings (e.g., "script.inline: true", Prior to 5.0 a third option could be specified for the `script.inline` and `script.stored` settings ("sandbox"). This has been removed, You can now only set `script.line: true` or `script.stored: true`. + +==== Search settings + +The setting `index.query.bool.max_clause_count` has been removed. In order to +set the maximum number of boolean clauses `indices.query.bool.max_clause_count` +should be used instead. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 6c5245ce646..0f87744d317 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -162,6 +162,7 @@ Other repository backends are available in these official plugins: * {plugins}/repository-s3.html[repository-s3] for S3 repository support * {plugins}/repository-hdfs.html[repository-hdfs] for HDFS repository support in Hadoop environments * {plugins}/repository-azure.html[repository-azure] for Azure storage repositories +* {plugins}/repository-gcs.html[repository-gcs] for Google Cloud Storage repositories [float] ===== Repository Verification diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index 713e19c26ce..a29073dafa9 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -28,6 +28,8 @@ PUT /my_index } } } + +GET /_cluster/health?wait_for_status=yellow ------------------------------------------ // CONSOLE // TESTSETUP @@ -73,7 +75,7 @@ This query has two required parameters: `id`:: The required parent id select documents must referrer to. -`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any +`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any documents for this query. This can be useful when querying multiple indexes which might have different mappings. When set to `false` (the default value) the query will throw an exception if the `type` is not mapped. diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 9be219f5e74..859455e89b7 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -10,15 +10,21 @@ body. Here is an example: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/tweet/_count?q=user:kimchy' +PUT /twitter/tweet/1?refresh +{ + "user": "kimchy" +} -$ curl -XGET 'http://localhost:9200/twitter/tweet/_count' -d ' +GET /twitter/tweet/_count?q=user:kimchy + +GET /twitter/tweet/_count { "query" : { "term" : { "user" : "kimchy" } } -}' +} -------------------------------------------------- +//CONSOLE NOTE: The query being sent in the body must be nested in a `query` key, same as the <> works @@ -37,6 +43,7 @@ tweets from the twitter index for a certain user. The result is: } } -------------------------------------------------- +// TESTRESPONSE The query is optional, and when not provided, it will use `match_all` to count all the docs. diff --git a/docs/reference/search/request/explain.asciidoc b/docs/reference/search/request/explain.asciidoc index 81dc110c263..9bcaecb4840 100644 --- a/docs/reference/search/request/explain.asciidoc +++ b/docs/reference/search/request/explain.asciidoc @@ -5,6 +5,7 @@ Enables explanation for each hit on how its score was computed. [source,js] -------------------------------------------------- +GET /_search { "explain": true, "query" : { @@ -12,3 +13,4 @@ Enables explanation for each hit on how its score was computed. } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/fielddata-fields.asciidoc b/docs/reference/search/request/fielddata-fields.asciidoc index aaaa606980e..f3a3508b144 100644 --- a/docs/reference/search/request/fielddata-fields.asciidoc +++ b/docs/reference/search/request/fielddata-fields.asciidoc @@ -6,13 +6,15 @@ example: [source,js] -------------------------------------------------- +GET /_search { "query" : { - ... + "match_all": {} }, "fielddata_fields" : ["test1", "test2"] } -------------------------------------------------- +// CONSOLE Field data fields can work on fields that are not stored. diff --git a/docs/reference/search/request/fields.asciidoc b/docs/reference/search/request/fields.asciidoc index e929928d427..3483d470ee2 100644 --- a/docs/reference/search/request/fields.asciidoc +++ b/docs/reference/search/request/fields.asciidoc @@ -11,6 +11,7 @@ by a search hit. [source,js] -------------------------------------------------- +GET /_search { "fields" : ["user", "postDate"], "query" : { @@ -18,6 +19,7 @@ by a search hit. } } -------------------------------------------------- +// CONSOLE `*` can be used to load all stored fields from the document. @@ -26,6 +28,7 @@ returned, for example: [source,js] -------------------------------------------------- +GET /_search { "fields" : [], "query" : { @@ -33,6 +36,7 @@ returned, for example: } } -------------------------------------------------- +// CONSOLE For backwards compatibility, if the fields parameter specifies fields which are not stored (`store` mapping set to diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc index 2e170dc2604..1c44a7ca8d2 100644 --- a/docs/reference/search/request/from-size.asciidoc +++ b/docs/reference/search/request/from-size.asciidoc @@ -12,6 +12,7 @@ defaults to `10`. [source,js] -------------------------------------------------- +GET /_search { "from" : 0, "size" : 10, "query" : { @@ -19,6 +20,8 @@ defaults to `10`. } } -------------------------------------------------- +// CONSOLE + Note that `from` + `size` can not be more than the `index.max_result_window` index setting which defaults to 10,000. See the <> or <> diff --git a/docs/reference/search/request/index-boost.asciidoc b/docs/reference/search/request/index-boost.asciidoc index 29d1da3885c..bf766ce8a8c 100644 --- a/docs/reference/search/request/index-boost.asciidoc +++ b/docs/reference/search/request/index-boost.asciidoc @@ -8,6 +8,7 @@ graph where each user has an index). [source,js] -------------------------------------------------- +GET /_search { "indices_boost" : { "index1" : 1.4, @@ -15,3 +16,4 @@ graph where each user has an index). } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/min-score.asciidoc b/docs/reference/search/request/min-score.asciidoc index f5a212ebf8e..d9dbef99ddf 100644 --- a/docs/reference/search/request/min-score.asciidoc +++ b/docs/reference/search/request/min-score.asciidoc @@ -6,6 +6,7 @@ in `min_score`: [source,js] -------------------------------------------------- +GET /_search { "min_score": 0.5, "query" : { @@ -13,6 +14,7 @@ in `min_score`: } } -------------------------------------------------- +// CONSOLE Note, most times, this does not make much sense, but is provided for advanced use cases. diff --git a/docs/reference/search/request/named-queries-and-filters.asciidoc b/docs/reference/search/request/named-queries-and-filters.asciidoc index 96d7c1357a9..0fb60253938 100644 --- a/docs/reference/search/request/named-queries-and-filters.asciidoc +++ b/docs/reference/search/request/named-queries-and-filters.asciidoc @@ -5,21 +5,25 @@ Each filter and query can accept a `_name` in its top level definition. [source,js] -------------------------------------------------- +GET /_search { - "bool" : { - "should" : [ - {"match" : { "name.first" : {"query" : "shay", "_name" : "first"} }}, - {"match" : { "name.last" : {"query" : "banon", "_name" : "last"} }} - ], - "filter" : { - "terms" : { - "name.last" : ["banon", "kimchy"], - "_name" : "test" + "query": { + "bool" : { + "should" : [ + {"match" : { "name.first" : {"query" : "shay", "_name" : "first"} }}, + {"match" : { "name.last" : {"query" : "banon", "_name" : "last"} }} + ], + "filter" : { + "terms" : { + "name.last" : ["banon", "kimchy"], + "_name" : "test" + } } } } } -------------------------------------------------- +// CONSOLE The search response will include for each hit the `matched_queries` it matched on. The tagging of queries and filters only make sense for the `bool` query. diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index 7bd95400312..493b4261c82 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -5,14 +5,43 @@ The `post_filter` is applied to the search `hits` at the very end of a search request, after aggregations have already been calculated. Its purpose is best explained by example: -Imagine that you are selling shirts, and the user has specified two filters: +Imagine that you are selling shirts that have the following properties: + +[source,js] +------------------------------------------------- +PUT /shirts +{ + "mappings": { + "item": { + "properties": { + "brand": { "type": "keyword"}, + "color": { "type": "keyword"}, + "model": { "type": "keyword"} + } + } + } +} + +PUT /shirts/item/1?refresh +{ + "brand": "gucci", + "color": "red", + "model": "slim" +} +------------------------------------------------ +// CONSOLE +// TESTSETUP + + +Imagine a user has specified two filters: + `color:red` and `brand:gucci`. You only want to show them red shirts made by Gucci in the search results. Normally you would do this with a <>: [source,js] -------------------------------------------------- -curl -XGET localhost:9200/shirts/_search -d ' +GET /shirts/_search { "query": { "bool": { @@ -23,8 +52,8 @@ curl -XGET localhost:9200/shirts/_search -d ' } } } -' -------------------------------------------------- +// CONSOLE However, you would also like to use _faceted navigation_ to display a list of other options that the user could click on. Perhaps you have a `model` field @@ -36,7 +65,7 @@ This can be done with a [source,js] -------------------------------------------------- -curl -XGET localhost:9200/shirts/_search -d ' +GET /shirts/_search { "query": { "bool": { @@ -52,8 +81,8 @@ curl -XGET localhost:9200/shirts/_search -d ' } } } -' -------------------------------------------------- +// CONSOLE <1> Returns the most popular models of red shirts by Gucci. But perhaps you would also like to tell the user how many Gucci shirts are @@ -67,12 +96,12 @@ the `post_filter`: [source,js] -------------------------------------------------- -curl -XGET localhost:9200/shirts/_search -d ' +GET /shirts/_search { "query": { "bool": { "filter": { - { "term": { "brand": "gucci" }} <1> + "term": { "brand": "gucci" } <1> } } }, @@ -95,8 +124,8 @@ curl -XGET localhost:9200/shirts/_search -d ' "term": { "color": "red" } } } -' -------------------------------------------------- +// CONSOLE <1> The main query now finds all shirts by Gucci, regardless of color. <2> The `colors` agg returns popular colors for shirts by Gucci. <3> The `color_red` agg limits the `models` sub-aggregation diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index 0d07f29475e..3d6c6b40cb9 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -56,7 +56,7 @@ for the user: [source,js] ------------------------------------------------ -curl localhost:9200/_search?preference=xyzabc123 -d ' +GET /_search?preference=xyzabc123 { "query": { "match": { @@ -64,7 +64,6 @@ curl localhost:9200/_search?preference=xyzabc123 -d ' } } } -' ------------------------------------------------ - +// CONSOLE diff --git a/docs/reference/search/request/query.asciidoc b/docs/reference/search/request/query.asciidoc index e496320bd97..fa06d0d9bb4 100644 --- a/docs/reference/search/request/query.asciidoc +++ b/docs/reference/search/request/query.asciidoc @@ -6,9 +6,11 @@ query using the <>. [source,js] -------------------------------------------------- +GET /_search { "query" : { "term" : { "user" : "kimchy" } } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 596aba31d82..6e054f02e1c 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -6,9 +6,10 @@ evaluation>> (based on different fields) for each hit, for example: [source,js] -------------------------------------------------- +GET /_search { "query" : { - ... + "match_all": {} }, "script_fields" : { "test1" : { @@ -25,6 +26,8 @@ evaluation>> (based on different fields) for each hit, for example: } } -------------------------------------------------- +// CONSOLE + Script fields can work on fields that are not stored (`my_field_name` in the above case), and allow to return custom values to be returned (the @@ -36,9 +39,10 @@ type). Here is an example: [source,js] -------------------------------------------------- +GET /_search { "query" : { - ... + "match_all": {} }, "script_fields" : { "test1" : { @@ -47,6 +51,7 @@ type). Here is an example: } } -------------------------------------------------- +// CONSOLE Note the `_source` keyword here to navigate the json-like model. diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index 8458d37806c..08625751eec 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -13,6 +13,7 @@ To disable `_source` retrieval set to `false`: [source,js] -------------------------------------------------- +GET /_search { "_source": false, "query" : { @@ -20,6 +21,7 @@ To disable `_source` retrieval set to `false`: } } -------------------------------------------------- +// CONSOLE The `_source` also accepts one or more wildcard patterns to control what parts of the `_source` should be returned: @@ -27,6 +29,7 @@ For example: [source,js] -------------------------------------------------- +GET /_search { "_source": "obj.*", "query" : { @@ -34,11 +37,13 @@ For example: } } -------------------------------------------------- +// CONSOLE Or [source,js] -------------------------------------------------- +GET /_search { "_source": [ "obj1.*", "obj2.*" ], "query" : { @@ -46,11 +51,13 @@ Or } } -------------------------------------------------- +// CONSOLE Finally, for complete control, you can specify both include and exclude patterns: [source,js] -------------------------------------------------- +GET /_search { "_source": { "include": [ "obj1.*", "obj2.*" ], @@ -61,3 +68,4 @@ Finally, for complete control, you can specify both include and exclude patterns } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc index 3b2329a828a..57c6ce27feb 100644 --- a/docs/reference/search/request/version.asciidoc +++ b/docs/reference/search/request/version.asciidoc @@ -5,6 +5,7 @@ Returns a version for each search hit. [source,js] -------------------------------------------------- +GET /_search { "version": true, "query" : { @@ -12,3 +13,4 @@ Returns a version for each search hit. } } -------------------------------------------------- +// CONSOLE diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java index 6f83746d4ce..deed43abf12 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.elasticsearch.search.aggregations.metrics.sum.Sum; @@ -122,7 +122,7 @@ public class EquivalenceTests extends ESIntegTestCase { } } - RangeAggregatorBuilder query = range("range").field("values"); + RangeAggregationBuilder query = range("range").field("values"); for (int i = 0; i < ranges.length; ++i) { String key = Integer.toString(i); if (ranges[i][0] == Double.NEGATIVE_INFINITY) { diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java index 1c17c1966e5..662d4d2f30c 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.AbstractTermsTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; @@ -113,17 +113,17 @@ public class MinDocCountTests extends AbstractTermsTestCase { private enum Script { NO { @Override - TermsAggregatorBuilder apply(TermsAggregatorBuilder builder, String field) { + TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { return builder.field(field); } }, YES { @Override - TermsAggregatorBuilder apply(TermsAggregatorBuilder builder, String field) { + TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { return builder.script(new org.elasticsearch.script.Script("doc['" + field + "'].values")); } }; - abstract TermsAggregatorBuilder apply(TermsAggregatorBuilder builder, String field); + abstract TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field); } // check that terms2 is a subset of terms1 diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java index 2e59b798297..7e76b3f03eb 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import java.util.Arrays; @@ -88,7 +88,7 @@ public class TDigestPercentileRanksTests extends AbstractNumericTestCase { return percents; } - private static PercentileRanksAggregatorBuilder randomCompression(PercentileRanksAggregatorBuilder builder) { + private static PercentileRanksAggregationBuilder randomCompression(PercentileRanksAggregationBuilder builder) { if (randomBoolean()) { builder.compression(randomIntBetween(20, 120) + randomDouble()); } @@ -462,4 +462,4 @@ public class TDigestPercentileRanksTests extends AbstractNumericTestCase { } } -} \ No newline at end of file +} diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java index 69d3c281ca8..712c9ebd951 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; -import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import java.util.Arrays; @@ -87,7 +87,7 @@ public class TDigestPercentilesTests extends AbstractNumericTestCase { return percentiles; } - private static PercentilesAggregatorBuilder randomCompression(PercentilesAggregatorBuilder builder) { + private static PercentilesAggregationBuilder randomCompression(PercentilesAggregationBuilder builder) { if (randomBoolean()) { builder.compression(randomIntBetween(20, 120) + randomDouble()); } @@ -446,4 +446,4 @@ public class TDigestPercentilesTests extends AbstractNumericTestCase { } } -} \ No newline at end of file +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index f8561d17acd..ee2f5484737 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -75,7 +75,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; * Abstract base for scrolling across a search and executing bulk actions on all results. All package private methods are package private so * their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block. */ -public abstract class AbstractAsyncBulkByScrollAction, Response> { +public abstract class AbstractAsyncBulkByScrollAction> { /** * The request for this action. Named mainRequest because we create lots of request variables all representing child * requests of this mainRequest. @@ -92,12 +92,13 @@ public abstract class AbstractAsyncBulkByScrollAction listener; + private final ActionListener listener; private final BackoffPolicy backoffPolicy; private final Retry bulkRetry; public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, ActionListener listener) { + ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, + ActionListener listener) { this.task = task; this.logger = logger; this.client = client; @@ -111,8 +112,13 @@ public abstract class AbstractAsyncBulkByScrollAction docs); - protected abstract Response buildResponse(TimeValue took, List indexingFailures, List searchFailures, - boolean timedOut); + /** + * Build the response for reindex actions. + */ + protected BulkIndexByScrollResponse buildResponse(TimeValue took, List indexingFailures, + List searchFailures, boolean timedOut) { + return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); + } /** * Start the action by firing the initial search request. diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index df9245346b3..0e3f3678851 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -20,15 +20,16 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.internal.IdFieldMapper; import org.elasticsearch.index.mapper.internal.IndexFieldMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; @@ -40,6 +41,7 @@ import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchHit; @@ -47,9 +49,9 @@ import org.elasticsearch.search.SearchHitField; import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.BiFunction; import static java.util.Collections.emptyMap; @@ -57,91 +59,106 @@ import static java.util.Collections.emptyMap; * Abstract base for scrolling across a search and executing bulk indexes on all * results. */ -public abstract class AbstractAsyncBulkIndexByScrollAction> - extends AbstractAsyncBulkByScrollAction { +public abstract class AbstractAsyncBulkIndexByScrollAction> + extends AbstractAsyncBulkByScrollAction { - private final ScriptService scriptService; - private final CompiledScript script; + protected final ScriptService scriptService; + protected final ClusterState clusterState; - public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, ClusterState state, - ParentTaskAssigningClient client, ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, - ActionListener listener) { + /** + * This BiFunction is used to apply various changes depending of the Reindex action and the search hit, + * from copying search hit metadata (parent, routing, etc) to potentially transforming the + * {@link RequestWrapper} completely. + */ + private final BiFunction, SearchHit, RequestWrapper> scriptApplier; + + public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, + ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener); this.scriptService = scriptService; - if (mainRequest.getScript() == null) { - script = null; - } else { - script = scriptService.compile(mainRequest.getScript(), ScriptContext.Standard.UPDATE, emptyMap(), state); - } - } - - @Override - protected BulkIndexByScrollResponse buildResponse(TimeValue took, List indexingFailures, - List searchFailures, boolean timedOut) { - return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); + this.clusterState = clusterState; + this.scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null"); } /** - * Build the IndexRequest for a single search hit. This shouldn't handle - * metadata or the script. That will be handled by copyMetadata and - * applyScript functions that can be overridden. + * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. */ - protected abstract IndexRequest buildIndexRequest(SearchHit doc); + protected BiFunction, SearchHit, RequestWrapper> buildScriptApplier() { + // The default script applier executes a no-op + return (request, searchHit) -> request; + } @Override protected BulkRequest buildBulk(Iterable docs) { BulkRequest bulkRequest = new BulkRequest(); - ExecutableScript executableScript = null; - Map scriptCtx = null; - for (SearchHit doc : docs) { - if (doc.hasSource()) { - /* - * Either the document didn't store _source or we didn't fetch it for some reason. Since we don't allow the user to - * change the "fields" part of the search request it is unlikely that we got here because we didn't fetch _source. - * Thus the error message assumes that it wasn't stored. - */ - throw new IllegalArgumentException("[" + doc.index() + "][" + doc.type() + "][" + doc.id() + "] didn't store _source"); - } - IndexRequest index = buildIndexRequest(doc); - copyMetadata(index, doc); - if (script != null) { - if (executableScript == null) { - executableScript = scriptService.executable(script, mainRequest.getScript().getParams()); - scriptCtx = new HashMap<>(); - } - if (false == applyScript(index, doc, executableScript, scriptCtx)) { - continue; + if (accept(doc)) { + RequestWrapper request = scriptApplier.apply(copyMetadata(buildRequest(doc), doc), doc); + if (request != null) { + bulkRequest.add(request.self()); } } - bulkRequest.add(index); } - return bulkRequest; } /** - * Copies the metadata from a hit to the index request. + * Used to accept or ignore a search hit. Ignored search hits will be excluded + * from the bulk request. It is also where we fail on invalid search hits, like + * when the document has no source but it's required. */ - protected void copyMetadata(IndexRequest index, SearchHit doc) { - index.parent(fieldValue(doc, ParentFieldMapper.NAME)); - copyRouting(index, doc); - // Comes back as a Long but needs to be a string - Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME); - if (timestamp != null) { - index.timestamp(timestamp.toString()); - } - Long ttl = fieldValue(doc, TTLFieldMapper.NAME); - if (ttl != null) { - index.ttl(ttl); + protected boolean accept(SearchHit doc) { + if (doc.hasSource()) { + /* + * Either the document didn't store _source or we didn't fetch it for some reason. Since we don't allow the user to + * change the "fields" part of the search request it is unlikely that we got here because we didn't fetch _source. + * Thus the error message assumes that it wasn't stored. + */ + throw new IllegalArgumentException("[" + doc.index() + "][" + doc.type() + "][" + doc.id() + "] didn't store _source"); } + return true; } /** - * Part of copyMetadata but called out individual for easy overwriting. + * Build the {@link RequestWrapper} for a single search hit. This shouldn't handle + * metadata or scripting. That will be handled by copyMetadata and + * apply functions that can be overridden. */ - protected void copyRouting(IndexRequest index, SearchHit doc) { - index.routing(fieldValue(doc, RoutingFieldMapper.NAME)); + protected abstract RequestWrapper buildRequest(SearchHit doc); + + /** + * Copies the metadata from a hit to the request. + */ + protected RequestWrapper copyMetadata(RequestWrapper request, SearchHit doc) { + copyParent(request, fieldValue(doc, ParentFieldMapper.NAME)); + copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME)); + + // Comes back as a Long but needs to be a string + Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME); + if (timestamp != null) { + request.setTimestamp(timestamp.toString()); + } + Long ttl = fieldValue(doc, TTLFieldMapper.NAME); + if (ttl != null) { + request.setTtl(ttl); + } + return request; + } + + /** + * Copy the parent from a search hit to the request. + */ + protected void copyParent(RequestWrapper request, String parent) { + request.setParent(parent); + } + + /** + * Copy the routing from a search hit to the request. + */ + protected void copyRouting(RequestWrapper request, String routing) { + request.setRouting(routing); } protected T fieldValue(SearchHit doc, String fieldName) { @@ -150,106 +167,327 @@ public abstract class AbstractAsyncBulkIndexByScrollAction ctx) { - if (script == null) { - return true; - } - ctx.put(IndexFieldMapper.NAME, doc.index()); - ctx.put(TypeFieldMapper.NAME, doc.type()); - ctx.put(IdFieldMapper.NAME, doc.id()); - Long oldVersion = doc.getVersion(); - ctx.put(VersionFieldMapper.NAME, oldVersion); - String oldParent = fieldValue(doc, ParentFieldMapper.NAME); - ctx.put(ParentFieldMapper.NAME, oldParent); - String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME); - ctx.put(RoutingFieldMapper.NAME, oldRouting); - Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME); - ctx.put(TimestampFieldMapper.NAME, oldTimestamp); - Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME); - ctx.put(TTLFieldMapper.NAME, oldTTL); - ctx.put(SourceFieldMapper.NAME, index.sourceAsMap()); - ctx.put("op", "update"); - script.setNextVar("ctx", ctx); - script.run(); - Map resultCtx = (Map) script.unwrap(ctx); - String newOp = (String) resultCtx.remove("op"); - if (newOp == null) { - throw new IllegalArgumentException("Script cleared op!"); - } - if ("noop".equals(newOp)) { - task.countNoop(); - return false; - } - if (false == "update".equals(newOp)) { - throw new IllegalArgumentException("Invalid op [" + newOp + ']'); - } + interface RequestWrapper> { - /* - * It'd be lovely to only set the source if we know its been modified - * but it isn't worth keeping two copies of it around just to check! - */ - index.source((Map) resultCtx.remove(SourceFieldMapper.NAME)); + void setIndex(String index); - Object newValue = ctx.remove(IndexFieldMapper.NAME); - if (false == doc.index().equals(newValue)) { - scriptChangedIndex(index, newValue); - } - newValue = ctx.remove(TypeFieldMapper.NAME); - if (false == doc.type().equals(newValue)) { - scriptChangedType(index, newValue); - } - newValue = ctx.remove(IdFieldMapper.NAME); - if (false == doc.id().equals(newValue)) { - scriptChangedId(index, newValue); - } - newValue = ctx.remove(VersionFieldMapper.NAME); - if (false == Objects.equals(oldVersion, newValue)) { - scriptChangedVersion(index, newValue); - } - newValue = ctx.remove(ParentFieldMapper.NAME); - if (false == Objects.equals(oldParent, newValue)) { - scriptChangedParent(index, newValue); - } - /* - * Its important that routing comes after parent in case you want to - * change them both. - */ - newValue = ctx.remove(RoutingFieldMapper.NAME); - if (false == Objects.equals(oldRouting, newValue)) { - scriptChangedRouting(index, newValue); - } - newValue = ctx.remove(TimestampFieldMapper.NAME); - if (false == Objects.equals(oldTimestamp, newValue)) { - scriptChangedTimestamp(index, newValue); - } - newValue = ctx.remove(TTLFieldMapper.NAME); - if (false == Objects.equals(oldTTL, newValue)) { - scriptChangedTTL(index, newValue); - } - if (false == ctx.isEmpty()) { - throw new IllegalArgumentException("Invalid fields added to ctx [" + String.join(",", ctx.keySet()) + ']'); - } - return true; + void setType(String type); + + void setId(String id); + + void setVersion(long version); + + void setVersionType(VersionType versionType); + + void setParent(String parent); + + void setRouting(String routing); + + void setTimestamp(String timestamp); + + void setTtl(Long ttl); + + void setSource(Map source); + + Map getSource(); + + Self self(); } - protected abstract void scriptChangedIndex(IndexRequest index, Object to); + /** + * {@link RequestWrapper} for {@link IndexRequest} + */ + public static class IndexRequestWrapper implements RequestWrapper { - protected abstract void scriptChangedType(IndexRequest index, Object to); + private final IndexRequest request; - protected abstract void scriptChangedId(IndexRequest index, Object to); + IndexRequestWrapper(IndexRequest request) { + this.request = Objects.requireNonNull(request, "Wrapped IndexRequest can not be null"); + } - protected abstract void scriptChangedVersion(IndexRequest index, Object to); + @Override + public void setIndex(String index) { + request.index(index); + } - protected abstract void scriptChangedRouting(IndexRequest index, Object to); + @Override + public void setType(String type) { + request.type(type); + } - protected abstract void scriptChangedParent(IndexRequest index, Object to); + @Override + public void setId(String id) { + request.id(id); + } - protected abstract void scriptChangedTimestamp(IndexRequest index, Object to); + @Override + public void setVersion(long version) { + request.version(version); + } - protected abstract void scriptChangedTTL(IndexRequest index, Object to); + @Override + public void setVersionType(VersionType versionType) { + request.versionType(versionType); + } + + @Override + public void setParent(String parent) { + request.parent(parent); + } + + @Override + public void setRouting(String routing) { + request.routing(routing); + } + + @Override + public void setTimestamp(String timestamp) { + request.timestamp(timestamp); + } + + @Override + public void setTtl(Long ttl) { + if (ttl == null) { + request.ttl((TimeValue) null); + } else { + request.ttl(ttl); + } + } + + @Override + public Map getSource() { + return request.sourceAsMap(); + } + + @Override + public void setSource(Map source) { + request.source(source); + } + + @Override + public IndexRequest self() { + return request; + } + } + + /** + * Wraps a {@link IndexRequest} in a {@link RequestWrapper} + */ + static RequestWrapper wrap(IndexRequest request) { + return new IndexRequestWrapper(request); + } + + /** + * {@link RequestWrapper} for {@link DeleteRequest} + */ + public static class DeleteRequestWrapper implements RequestWrapper { + + private final DeleteRequest request; + + DeleteRequestWrapper(DeleteRequest request) { + this.request = Objects.requireNonNull(request, "Wrapped DeleteRequest can not be null"); + } + + @Override + public void setIndex(String index) { + request.index(index); + } + + @Override + public void setType(String type) { + request.type(type); + } + + @Override + public void setId(String id) { + request.id(id); + } + + @Override + public void setVersion(long version) { + request.version(version); + } + + @Override + public void setVersionType(VersionType versionType) { + request.versionType(versionType); + } + + @Override + public void setParent(String parent) { + request.parent(parent); + } + + @Override + public void setRouting(String routing) { + request.routing(routing); + } + + @Override + public void setTimestamp(String timestamp) { + throw new UnsupportedOperationException("unable to set [timestamp] on action request [" + request.getClass() + "]"); + } + + @Override + public void setTtl(Long ttl) { + throw new UnsupportedOperationException("unable to set [ttl] on action request [" + request.getClass() + "]"); + } + + @Override + public Map getSource() { + throw new UnsupportedOperationException("unable to get source from action request [" + request.getClass() + "]"); + } + + @Override + public void setSource(Map source) { + throw new UnsupportedOperationException("unable to set [source] on action request [" + request.getClass() + "]"); + } + + @Override + public DeleteRequest self() { + return request; + } + } + + /** + * Wraps a {@link DeleteRequest} in a {@link RequestWrapper} + */ + static RequestWrapper wrap(DeleteRequest request) { + return new DeleteRequestWrapper(request); + } + + /** + * Apply a {@link Script} to a {@link RequestWrapper} + */ + public abstract class ScriptApplier implements BiFunction, SearchHit, RequestWrapper> { + + private final BulkByScrollTask task; + private final ScriptService scriptService; + private final ClusterState state; + private final Script script; + private final Map params; + + private ExecutableScript executable; + private Map context; + + public ScriptApplier(BulkByScrollTask task, ScriptService scriptService, Script script, ClusterState state, + Map params) { + this.task = task; + this.scriptService = scriptService; + this.script = script; + this.state = state; + this.params = params; + } + + @Override + @SuppressWarnings("unchecked") + public RequestWrapper apply(RequestWrapper request, SearchHit doc) { + if (script == null) { + return request; + } + if (executable == null) { + CompiledScript compiled = scriptService.compile(script, ScriptContext.Standard.UPDATE, emptyMap(), state); + executable = scriptService.executable(compiled, params); + } + if (context == null) { + context = new HashMap<>(); + } + + context.put(IndexFieldMapper.NAME, doc.index()); + context.put(TypeFieldMapper.NAME, doc.type()); + context.put(IdFieldMapper.NAME, doc.id()); + Long oldVersion = doc.getVersion(); + context.put(VersionFieldMapper.NAME, oldVersion); + String oldParent = fieldValue(doc, ParentFieldMapper.NAME); + context.put(ParentFieldMapper.NAME, oldParent); + String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME); + context.put(RoutingFieldMapper.NAME, oldRouting); + Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME); + context.put(TimestampFieldMapper.NAME, oldTimestamp); + Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME); + context.put(TTLFieldMapper.NAME, oldTTL); + context.put(SourceFieldMapper.NAME, request.getSource()); + context.put("op", "update"); + executable.setNextVar("ctx", context); + executable.run(); + + Map resultCtx = (Map) executable.unwrap(context); + String newOp = (String) resultCtx.remove("op"); + if (newOp == null) { + throw new IllegalArgumentException("Script cleared op!"); + } + if ("noop".equals(newOp)) { + task.countNoop(); + return null; + } + if (false == "update".equals(newOp)) { + throw new IllegalArgumentException("Invalid op [" + newOp + ']'); + } + + /* + * It'd be lovely to only set the source if we know its been modified + * but it isn't worth keeping two copies of it around just to check! + */ + request.setSource((Map) resultCtx.remove(SourceFieldMapper.NAME)); + + Object newValue = context.remove(IndexFieldMapper.NAME); + if (false == doc.index().equals(newValue)) { + scriptChangedIndex(request, newValue); + } + newValue = context.remove(TypeFieldMapper.NAME); + if (false == doc.type().equals(newValue)) { + scriptChangedType(request, newValue); + } + newValue = context.remove(IdFieldMapper.NAME); + if (false == doc.id().equals(newValue)) { + scriptChangedId(request, newValue); + } + newValue = context.remove(VersionFieldMapper.NAME); + if (false == Objects.equals(oldVersion, newValue)) { + scriptChangedVersion(request, newValue); + } + newValue = context.remove(ParentFieldMapper.NAME); + if (false == Objects.equals(oldParent, newValue)) { + scriptChangedParent(request, newValue); + } + /* + * Its important that routing comes after parent in case you want to + * change them both. + */ + newValue = context.remove(RoutingFieldMapper.NAME); + if (false == Objects.equals(oldRouting, newValue)) { + scriptChangedRouting(request, newValue); + } + newValue = context.remove(TimestampFieldMapper.NAME); + if (false == Objects.equals(oldTimestamp, newValue)) { + scriptChangedTimestamp(request, newValue); + } + newValue = context.remove(TTLFieldMapper.NAME); + if (false == Objects.equals(oldTTL, newValue)) { + scriptChangedTTL(request, newValue); + } + if (false == context.isEmpty()) { + throw new IllegalArgumentException("Invalid fields added to context [" + String.join(",", context.keySet()) + ']'); + } + return request; + } + + protected abstract void scriptChangedIndex(RequestWrapper request, Object to); + + protected abstract void scriptChangedType(RequestWrapper request, Object to); + + protected abstract void scriptChangedId(RequestWrapper request, Object to); + + protected abstract void scriptChangedVersion(RequestWrapper request, Object to); + + protected abstract void scriptChangedRouting(RequestWrapper request, Object to); + + protected abstract void scriptChangedParent(RequestWrapper request, Object to); + + protected abstract void scriptChangedTimestamp(RequestWrapper request, Object to); + + protected abstract void scriptChangedTTL(RequestWrapper request, Object to); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index d98735d3fb4..e78a6a9c350 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -45,32 +46,6 @@ public abstract class AbstractBaseReindexRestHandler< TA extends TransportAction > extends BaseRestHandler { - /** - * @return requests_per_second from the request as a float if it was on the request, null otherwise - */ - public static Float parseRequestsPerSecond(RestRequest request) { - String requestsPerSecondString = request.param("requests_per_second"); - if (requestsPerSecondString == null) { - return null; - } - if ("unlimited".equals(requestsPerSecondString)) { - return Float.POSITIVE_INFINITY; - } - float requestsPerSecond; - try { - requestsPerSecond = Float.parseFloat(requestsPerSecondString); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - "[requests_per_second] must be a float greater than 0. Use \"unlimited\" to disable throttling.", e); - } - if (requestsPerSecond <= 0) { - // We validate here and in the setters because the setters use "Float.POSITIVE_INFINITY" instead of "unlimited" - throw new IllegalArgumentException( - "[requests_per_second] must be a float greater than 0. Use \"unlimited\" to disable throttling."); - } - return requestsPerSecond; - } - protected final IndicesQueriesRegistry indicesQueriesRegistry; protected final AggregatorParsers aggParsers; protected final Suggesters suggesters; @@ -88,41 +63,95 @@ public abstract class AbstractBaseReindexRestHandler< this.action = action; } - protected void execute(RestRequest request, Request internalRequest, RestChannel channel, - boolean includeCreated, boolean includeUpdated, boolean includeDeleted) throws IOException { - Float requestsPerSecond = parseRequestsPerSecond(request); - if (requestsPerSecond != null) { - internalRequest.setRequestsPerSecond(requestsPerSecond); - } + protected void handleRequest(RestRequest request, RestChannel channel, + boolean includeCreated, boolean includeUpdated, boolean includeDeleted) throws IOException { + // Build the internal request + Request internal = setCommonOptions(request, buildRequest(request)); + + // Executes the request and waits for completion if (request.paramAsBoolean("wait_for_completion", true)) { Map params = new HashMap<>(); params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(includeCreated)); params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(includeUpdated)); params.put(BulkByScrollTask.Status.INCLUDE_DELETED, Boolean.toString(includeDeleted)); - action.execute(internalRequest, new BulkIndexByScrollResponseContentListener<>(channel, params)); + action.execute(internal, new BulkIndexByScrollResponseContentListener<>(channel, params)); return; } + /* * Lets try and validate before forking so the user gets some error. The * task can't totally validate until it starts but this is better than * nothing. */ - ActionRequestValidationException validationException = internalRequest.validate(); + ActionRequestValidationException validationException = internal.validate(); if (validationException != null) { channel.sendResponse(new BytesRestResponse(channel, validationException)); return; } - Task task = action.execute(internalRequest, LoggingTaskListener.instance()); - sendTask(channel, task); + sendTask(channel, action.execute(internal, LoggingTaskListener.instance())); + } + + /** + * Build the Request based on the RestRequest. + */ + protected abstract Request buildRequest(RestRequest request) throws IOException; + + /** + * Sets common options of {@link AbstractBulkByScrollRequest} requests. + */ + protected Request setCommonOptions(RestRequest restRequest, Request request) { + assert restRequest != null : "RestRequest should not be null"; + assert request != null : "Request should not be null"; + + request.setRefresh(restRequest.paramAsBoolean("refresh", request.isRefresh())); + request.setTimeout(restRequest.paramAsTime("timeout", request.getTimeout())); + + String consistency = restRequest.param("consistency"); + if (consistency != null) { + request.setConsistency(WriteConsistencyLevel.fromString(consistency)); + } + + Float requestsPerSecond = parseRequestsPerSecond(restRequest); + if (requestsPerSecond != null) { + request.setRequestsPerSecond(requestsPerSecond); + } + return request; } private void sendTask(RestChannel channel, Task task) throws IOException { - XContentBuilder builder = channel.newBuilder(); - builder.startObject(); - builder.field("task", clusterService.localNode().getId() + ":" + task.getId()); - builder.endObject(); - channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("task", clusterService.localNode().getId() + ":" + task.getId()); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + } + } + + /** + * @return requests_per_second from the request as a float if it was on the request, null otherwise + */ + public static Float parseRequestsPerSecond(RestRequest request) { + String requestsPerSecondString = request.param("requests_per_second"); + if (requestsPerSecondString == null) { + return null; + } + if ("unlimited".equals(requestsPerSecondString)) { + return Float.POSITIVE_INFINITY; + } + float requestsPerSecond; + try { + requestsPerSecond = Float.parseFloat(requestsPerSecondString); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "[requests_per_second] must be a float greater than 0. Use \"unlimited\" to disable throttling.", e); + } + if (requestsPerSecond <= 0) { + // We validate here and in the setters because the setters use "Float.POSITIVE_INFINITY" instead of "unlimited" + throw new IllegalArgumentException( + "[requests_per_second] must be a float greater than 0. Use \"unlimited\" to disable throttling."); + } + return requestsPerSecond; } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java new file mode 100644 index 00000000000..926da3befdd --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.rest.action.support.RestActions; +import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.suggest.Suggesters; + +import java.io.IOException; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; + +/** + * Rest handler for reindex actions that accepts a search request like Update-By-Query or Delete-By-Query + */ +public abstract class AbstractBulkByQueryRestHandler< + Request extends AbstractBulkByScrollRequest, + TA extends TransportAction> extends AbstractBaseReindexRestHandler { + + protected AbstractBulkByQueryRestHandler(Settings settings, Client client, IndicesQueriesRegistry indicesQueriesRegistry, + AggregatorParsers aggParsers, Suggesters suggesters, ClusterService clusterService, + TA action) { + super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); + } + + protected void parseInternalRequest(Request internal, RestRequest restRequest, + Map> consumers) throws IOException { + assert internal != null : "Request should not be null"; + assert restRequest != null : "RestRequest should not be null"; + + SearchRequest searchRequest = internal.getSearchRequest(); + int scrollSize = searchRequest.source().size(); + searchRequest.source().size(SIZE_ALL_MATCHES); + + parseSearchRequest(searchRequest, restRequest, consumers); + + internal.setSize(searchRequest.source().size()); + searchRequest.source().size(restRequest.paramAsInt("scroll_size", scrollSize)); + + String conflicts = restRequest.param("conflicts"); + if (conflicts != null) { + internal.setConflicts(conflicts); + } + + // Let the requester set search timeout. It is probably only going to be useful for testing but who knows. + if (restRequest.hasParam("search_timeout")) { + searchRequest.source().timeout(restRequest.paramAsTime("search_timeout", null)); + } + } + + protected void parseSearchRequest(SearchRequest searchRequest, RestRequest restRequest, + Map> consumers) throws IOException { + assert searchRequest != null : "SearchRequest should not be null"; + assert restRequest != null : "RestRequest should not be null"; + + /* + * We can't send parseSearchRequest REST content that it doesn't support + * so we will have to remove the content that is valid in addition to + * what it supports from the content first. This is a temporary hack and + * should get better when SearchRequest has full ObjectParser support + * then we can delegate and stuff. + */ + BytesReference content = RestActions.hasBodyContent(restRequest) ? RestActions.getRestContent(restRequest) : null; + if ((content != null) && (consumers != null && consumers.size() > 0)) { + Tuple> body = XContentHelper.convertToMap(content, false); + boolean modified = false; + for (Map.Entry> consumer : consumers.entrySet()) { + Object value = body.v2().remove(consumer.getKey()); + if (value != null) { + consumer.getValue().accept(value); + modified = true; + } + } + + if (modified) { + try (XContentBuilder builder = XContentFactory.contentBuilder(body.v1())) { + content = builder.map(body.v2()).bytes(); + } + } + } + + RestSearchAction.parseSearchRequest(searchRequest, indicesQueriesRegistry, restRequest, parseFieldMatcher, aggParsers, + suggesters, content); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java new file mode 100644 index 00000000000..c789e9c77b4 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteByQueryAction extends Action { + + public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction(); + public static final String NAME = "indices:data/write/delete/byquery"; + + private DeleteByQueryAction() { + super(NAME); + } + + @Override + public DeleteByQueryRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteByQueryRequestBuilder(client, this); + } + + @Override + public BulkIndexByScrollResponse newResponse() { + return new BulkIndexByScrollResponse(); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java new file mode 100644 index 00000000000..327459bd339 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.search.SearchRequest; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Creates a new {@link DeleteByQueryRequest} that uses scrolling and bulk requests to delete all documents matching + * the query. This can have performance as well as visibility implications. + * + * Delete-by-query now has the following semantics: + *

    + *
  • it's non-atomic, a delete-by-query may fail at any time while some documents matching the query have already been + * deleted
  • + *
  • it's syntactic sugar, a delete-by-query is equivalent to a scroll search and corresponding bulk-deletes by ID
  • + *
  • it's executed on a point-in-time snapshot, a delete-by-query will only delete the documents that are visible at the + * point in time the delete-by-query was started, equivalent to the scroll API
  • + *
  • it's consistent, a delete-by-query will yield consistent results across all replicas of a shard
  • + *
  • it's forward-compatible, a delete-by-query will only send IDs to the shards as deletes such that no queries are + * stored in the transaction logs that might not be supported in the future.
  • + *
  • it's results won't be visible until the index is refreshed.
  • + *
+ */ +public class DeleteByQueryRequest extends AbstractBulkByScrollRequest { + + public DeleteByQueryRequest() { + } + + public DeleteByQueryRequest(SearchRequest search) { + super(search); + // Delete-By-Query does not require the source + search.source().fetchSource(false); + } + + @Override + protected DeleteByQueryRequest self() { + return this; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException e = super.validate(); + if (getSearchRequest().indices() == null || getSearchRequest().indices().length == 0) { + e = addValidationError("use _all if you really want to delete from all existing indexes", e); + } + if (getSearchRequest() == null || getSearchRequest().source() == null) { + e = addValidationError("source is missing", e); + } + return e; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("delete-by-query "); + searchToString(b); + return b.toString(); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java new file mode 100644 index 00000000000..f4d8a91f4cb --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteByQueryRequestBuilder extends + AbstractBulkByScrollRequestBuilder { + + public DeleteByQueryRequestBuilder(ElasticsearchClient client, + Action action) { + this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE)); + } + + private DeleteByQueryRequestBuilder(ElasticsearchClient client, + Action action, + SearchRequestBuilder search) { + super(client, action, search, new DeleteByQueryRequest(search.request())); + } + + @Override + protected DeleteByQueryRequestBuilder self() { + return this; + } + + @Override + public DeleteByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) { + request.setAbortOnVersionConflict(abortOnVersionConflict); + return this; + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index 6e42f56dece..e3a826dbdad 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -39,12 +39,14 @@ public class ReindexPlugin extends Plugin { public void onModule(ActionModule actionModule) { actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class); actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class); + actionModule.registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class); actionModule.registerAction(RethrottleAction.INSTANCE, TransportRethrottleAction.class); } public void onModule(NetworkModule networkModule) { networkModule.registerRestHandler(RestReindexAction.class); networkModule.registerRestHandler(RestUpdateByQueryAction.class); + networkModule.registerRestHandler(RestDeleteByQueryAction.class); networkModule.registerRestHandler(RestRethrottleAction.class); networkModule.registerTaskStatus(BulkByScrollTask.Status.NAME, BulkByScrollTask.Status::new); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java new file mode 100644 index 00000000000..4750fe22313 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.suggest.Suggesters; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler { + + @Inject + public RestDeleteByQueryAction(Settings settings, RestController controller, Client client, + IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, + ClusterService clusterService, TransportDeleteByQueryAction action) { + super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); + controller.registerHandler(POST, "/{index}/_delete_by_query", this); + controller.registerHandler(POST, "/{index}/{type}/_delete_by_query", this); + } + + @Override + protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + if (false == request.hasContent()) { + throw new ElasticsearchException("_delete_by_query requires a request body"); + } + handleRequest(request, channel, false, false, true); + } + + @Override + protected DeleteByQueryRequest buildRequest(RestRequest request) throws IOException { + /* + * Passing the search request through DeleteByQueryRequest first allows + * it to set its own defaults which differ from SearchRequest's + * defaults. Then the parseInternalRequest can override them. + */ + DeleteByQueryRequest internal = new DeleteByQueryRequest(new SearchRequest()); + + Map> consumers = new HashMap<>(); + consumers.put("conflicts", o -> internal.setConflicts((String) o)); + + parseInternalRequest(internal, request, consumers); + + return internal; + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index d61980fb8ce..267994672d4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Client; @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -39,7 +38,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -53,13 +51,14 @@ import java.util.Map; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; /** * Expose IndexBySearchRequest over rest. */ public class RestReindexAction extends AbstractBaseReindexRestHandler { + private static final ObjectParser PARSER = new ObjectParser<>("reindex"); + static { ObjectParser.Parser sourceParser = (parser, search, context) -> { /* @@ -114,41 +113,18 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler internalRequest, RestRequest request) { - internalRequest.setRefresh(request.paramAsBoolean("refresh", internalRequest.isRefresh())); - internalRequest.setTimeout(request.paramAsTime("timeout", internalRequest.getTimeout())); - String consistency = request.param("consistency"); - if (consistency != null) { - internalRequest.setConsistency(WriteConsistencyLevel.fromString(consistency)); + PARSER.parse(xcontent, internal, new ReindexParseContext(indicesQueriesRegistry, aggParsers, suggesters, parseFieldMatcher)); } + return internal; } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java index a7c29d40a7d..382f5b51726 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java @@ -39,6 +39,7 @@ public class RestRethrottleAction extends BaseRestHandler { super(settings, client); this.action = action; controller.registerHandler(POST, "/_update_by_query/{taskId}/_rethrottle", this); + controller.registerHandler(POST, "/_delete_by_query/{taskId}/_rethrottle", this); controller.registerHandler(POST, "/_reindex/{taskId}/_rethrottle", this); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index 9f76be3f0ff..f7dbbf893a8 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -22,31 +22,24 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.suggest.Suggesters; +import java.io.IOException; +import java.util.HashMap; import java.util.Map; +import java.util.function.Consumer; -import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; -import static org.elasticsearch.index.reindex.RestReindexAction.parseCommon; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class RestUpdateByQueryAction extends AbstractBaseReindexRestHandler { +public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler { @Inject public RestUpdateByQueryAction(Settings settings, RestController controller, Client client, @@ -59,60 +52,26 @@ public class RestUpdateByQueryAction extends AbstractBaseReindexRestHandler> body = XContentHelper.convertToMap(bodyContent, false); - boolean modified = false; - String conflicts = (String) body.v2().remove("conflicts"); - if (conflicts != null) { - internalRequest.setConflicts(conflicts); - modified = true; - } - @SuppressWarnings("unchecked") - Map script = (Map) body.v2().remove("script"); - if (script != null) { - internalRequest.setScript(Script.parse(script, false, parseFieldMatcher)); - modified = true; - } - if (modified) { - XContentBuilder builder = XContentFactory.contentBuilder(body.v1()); - builder.map(body.v2()); - bodyContent = builder.bytes(); - } - } - RestSearchAction.parseSearchRequest(internalRequest.getSearchRequest(), indicesQueriesRegistry, request, - parseFieldMatcher, aggParsers, suggesters, bodyContent); + UpdateByQueryRequest internal = new UpdateByQueryRequest(new SearchRequest()); - String conflicts = request.param("conflicts"); - if (conflicts != null) { - internalRequest.setConflicts(conflicts); - } - parseCommon(internalRequest, request); + Map> consumers = new HashMap<>(); + consumers.put("conflicts", o -> internal.setConflicts((String) o)); + consumers.put("script", o -> internal.setScript(Script.parse((Map)o, false, parseFieldMatcher))); - internalRequest.setSize(internalRequest.getSearchRequest().source().size()); - internalRequest.setPipeline(request.param("pipeline")); - internalRequest.getSearchRequest().source().size(request.paramAsInt("scroll_size", scrollSize)); - // Let the requester set search timeout. It is probably only going to be useful for testing but who knows. - if (request.hasParam("search_timeout")) { - internalRequest.getSearchRequest().source().timeout(request.paramAsTime("search_timeout", null)); - } + parseInternalRequest(internal, request, consumers); - execute(request, internalRequest, channel, false, true, false); + internal.setPipeline(request.param("pipeline")); + return internal; } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java new file mode 100644 index 00000000000..471bd066f94 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportDeleteByQueryAction extends HandledTransportAction { + private final Client client; + private final ScriptService scriptService; + private final ClusterService clusterService; + + @Inject + public TransportDeleteByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver resolver, Client client, TransportService transportService, + ScriptService scriptService, ClusterService clusterService) { + super(settings, DeleteByQueryAction.NAME, threadPool, transportService, actionFilters, resolver, DeleteByQueryRequest::new); + this.client = client; + this.scriptService = scriptService; + this.clusterService = clusterService; + } + + @Override + protected void doExecute(Task task, DeleteByQueryRequest request, ActionListener listener) { + ClusterState state = clusterService.state(); + ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); + new AsyncDeleteBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); + } + + @Override + protected void doExecute(DeleteByQueryRequest request, ActionListener listener) { + throw new UnsupportedOperationException("task required"); + } + + /** + * Implementation of delete-by-query using scrolling and bulk. + */ + static class AsyncDeleteBySearchAction extends AbstractAsyncBulkIndexByScrollAction { + + public AsyncDeleteBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + DeleteByQueryRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { + super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); + } + + @Override + protected boolean accept(SearchHit doc) { + // Delete-by-query does not require the source to delete a document + // and the default implementation checks for it + return true; + } + + @Override + protected RequestWrapper buildRequest(SearchHit doc) { + DeleteRequest delete = new DeleteRequest(); + delete.index(doc.index()); + delete.type(doc.type()); + delete.id(doc.id()); + delete.version(doc.version()); + return wrap(delete); + } + + /** + * Overrides the parent {@link AbstractAsyncBulkIndexByScrollAction#copyMetadata(RequestWrapper, SearchHit)} + * method that is much more Update/Reindex oriented and so also copies things like timestamp/ttl which we + * don't care for a deletion. + */ + @Override + protected RequestWrapper copyMetadata(RequestWrapper request, SearchHit doc) { + copyParent(request, fieldValue(doc, ParentFieldMapper.NAME)); + copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME)); + return request; + } + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 0f07cc560c8..a49ba0a3b32 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -35,16 +35,18 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; import org.elasticsearch.index.mapper.internal.VersionFieldMapper; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Map; import java.util.Objects; +import java.util.function.BiFunction; import static java.util.Objects.requireNonNull; import static org.elasticsearch.index.VersionType.INTERNAL; @@ -72,7 +74,7 @@ public class TransportReindexAction extends HandledTransportAction { - public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, - ParentTaskAssigningClient client, ClusterState state, ThreadPool threadPool, ReindexRequest request, - ActionListener listener) { - super(task, logger, scriptService, state, client, threadPool, request, request.getSearchRequest(), listener); + + public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + ReindexRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { + super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); } @Override - protected IndexRequest buildIndexRequest(SearchHit doc) { + protected BiFunction, SearchHit, RequestWrapper> buildScriptApplier() { + Script script = mainRequest.getScript(); + if (script != null) { + return new ReindexScriptApplier(task, scriptService, script, clusterState, script.getParams()); + } + return super.buildScriptApplier(); + } + + @Override + protected RequestWrapper buildRequest(SearchHit doc) { IndexRequest index = new IndexRequest(); // Copy the index from the request so we always write where it asked to write @@ -161,109 +174,120 @@ public class TransportReindexAction extends HandledTransportAction request, String routing) { String routingSpec = mainRequest.getDestination().routing(); if (routingSpec == null) { - super.copyRouting(index, doc); + super.copyRouting(request, routing); return; } if (routingSpec.startsWith("=")) { - index.routing(mainRequest.getDestination().routing().substring(1)); + super.copyRouting(request, mainRequest.getDestination().routing().substring(1)); return; } switch (routingSpec) { case "keep": - super.copyRouting(index, doc); + super.copyRouting(request, routing); break; case "discard": - index.routing(null); + super.copyRouting(request, null); break; default: throw new IllegalArgumentException("Unsupported routing command"); } } - /* - * Methods below here handle script updating the index request. They try - * to be pretty liberal with regards to types because script are often - * dynamically typed. - */ - @Override - protected void scriptChangedIndex(IndexRequest index, Object to) { - requireNonNull(to, "Can't reindex without a destination index!"); - index.index(to.toString()); - } + class ReindexScriptApplier extends ScriptApplier { - @Override - protected void scriptChangedType(IndexRequest index, Object to) { - requireNonNull(to, "Can't reindex without a destination type!"); - index.type(to.toString()); - } - - @Override - protected void scriptChangedId(IndexRequest index, Object to) { - index.id(Objects.toString(to, null)); - } - - @Override - protected void scriptChangedVersion(IndexRequest index, Object to) { - if (to == null) { - index.version(Versions.MATCH_ANY).versionType(INTERNAL); - return; + ReindexScriptApplier(BulkByScrollTask task, ScriptService scriptService, Script script, ClusterState state, + Map params) { + super(task, scriptService, script, state, params); } - index.version(asLong(to, VersionFieldMapper.NAME)); - } - @Override - protected void scriptChangedParent(IndexRequest index, Object to) { - // Have to override routing with parent just in case its changed - String routing = Objects.toString(to, null); - index.parent(routing).routing(routing); - } - - @Override - protected void scriptChangedRouting(IndexRequest index, Object to) { - index.routing(Objects.toString(to, null)); - } - - @Override - protected void scriptChangedTimestamp(IndexRequest index, Object to) { - index.timestamp(Objects.toString(to, null)); - } - - @Override - protected void scriptChangedTTL(IndexRequest index, Object to) { - if (to == null) { - index.ttl((TimeValue) null); - return; - } - index.ttl(asLong(to, TTLFieldMapper.NAME)); - } - - private long asLong(Object from, String name) { /* - * Stuffing a number into the map will have converted it to - * some Number. + * Methods below here handle script updating the index request. They try + * to be pretty liberal with regards to types because script are often + * dynamically typed. */ - Number fromNumber; - try { - fromNumber = (Number) from; - } catch (ClassCastException e) { - throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]", e); + + @Override + protected void scriptChangedIndex(RequestWrapper request, Object to) { + requireNonNull(to, "Can't reindex without a destination index!"); + request.setIndex(to.toString()); } - long l = fromNumber.longValue(); - // Check that we didn't round when we fetched the value. - if (fromNumber.doubleValue() != l) { - throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]"); + + @Override + protected void scriptChangedType(RequestWrapper request, Object to) { + requireNonNull(to, "Can't reindex without a destination type!"); + request.setType(to.toString()); + } + + @Override + protected void scriptChangedId(RequestWrapper request, Object to) { + request.setId(Objects.toString(to, null)); + } + + @Override + protected void scriptChangedVersion(RequestWrapper request, Object to) { + if (to == null) { + request.setVersion(Versions.MATCH_ANY); + request.setVersionType(INTERNAL); + } else { + request.setVersion(asLong(to, VersionFieldMapper.NAME)); + } + } + + @Override + protected void scriptChangedParent(RequestWrapper request, Object to) { + // Have to override routing with parent just in case its changed + String routing = Objects.toString(to, null); + request.setParent(routing); + request.setRouting(routing); + } + + @Override + protected void scriptChangedRouting(RequestWrapper request, Object to) { + request.setRouting(Objects.toString(to, null)); + } + + @Override + protected void scriptChangedTimestamp(RequestWrapper request, Object to) { + request.setTimestamp(Objects.toString(to, null)); + } + + @Override + protected void scriptChangedTTL(RequestWrapper request, Object to) { + if (to == null) { + request.setTtl(null); + } else { + request.setTtl(asLong(to, TTLFieldMapper.NAME)); + } + } + + private long asLong(Object from, String name) { + /* + * Stuffing a number into the map will have converted it to + * some Number. + * */ + Number fromNumber; + try { + fromNumber = (Number) from; + } catch (ClassCastException e) { + throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]", e); + } + long l = fromNumber.longValue(); + // Check that we didn't round when we fetched the value. + if (fromNumber.doubleValue() != l) { + throw new IllegalArgumentException(name + " may only be set to an int or a long but was [" + from + "]"); + } + return l; } - return l; } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 4d702be4e9d..7459972ce64 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -39,12 +39,16 @@ import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Map; +import java.util.function.BiFunction; + public class TransportUpdateByQueryAction extends HandledTransportAction { private final Client client; private final ScriptService scriptService; @@ -65,8 +69,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction listener) { ClusterState state = clusterService.state(); ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); - new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, scriptService, client, threadPool, state, request, listener) - .start(); + new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); } @Override @@ -78,14 +81,24 @@ public class TransportUpdateByQueryAction extends HandledTransportAction { - public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ScriptService scriptService, - ParentTaskAssigningClient client, ThreadPool threadPool, ClusterState clusterState, UpdateByQueryRequest request, - ActionListener listener) { - super(task, logger, scriptService, clusterState, client, threadPool, request, request.getSearchRequest(), listener); + + public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool, + UpdateByQueryRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { + super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); } @Override - protected IndexRequest buildIndexRequest(SearchHit doc) { + protected BiFunction, SearchHit, RequestWrapper> buildScriptApplier() { + Script script = mainRequest.getScript(); + if (script != null) { + return new UpdateByQueryScriptApplier(task, scriptService, script, clusterState, script.getParams()); + } + return super.buildScriptApplier(); + } + + @Override + protected RequestWrapper buildRequest(SearchHit doc) { IndexRequest index = new IndexRequest(); index.index(doc.index()); index.type(doc.type()); @@ -94,47 +107,55 @@ public class TransportUpdateByQueryAction extends HandledTransportAction params) { + super(task, scriptService, script, state, params); + } - @Override - protected void scriptChangedId(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed"); - } + @Override + protected void scriptChangedIndex(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + IndexFieldMapper.NAME + "] not allowed"); + } - @Override - protected void scriptChangedVersion(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [_version] not allowed"); - } + @Override + protected void scriptChangedType(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + TypeFieldMapper.NAME + "] not allowed"); + } - @Override - protected void scriptChangedRouting(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed"); - } + @Override + protected void scriptChangedId(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + IdFieldMapper.NAME + "] not allowed"); + } - @Override - protected void scriptChangedParent(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed"); - } + @Override + protected void scriptChangedVersion(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [_version] not allowed"); + } - @Override - protected void scriptChangedTimestamp(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [" + TimestampFieldMapper.NAME + "] not allowed"); - } + @Override + protected void scriptChangedRouting(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed"); + } - @Override - protected void scriptChangedTTL(IndexRequest index, Object to) { - throw new IllegalArgumentException("Modifying [" + TTLFieldMapper.NAME + "] not allowed"); + @Override + protected void scriptChangedParent(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed"); + } + + @Override + protected void scriptChangedTimestamp(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + TimestampFieldMapper.NAME + "] not allowed"); + } + + @Override + protected void scriptChangedTTL(RequestWrapper request, Object to) { + throw new IllegalArgumentException("Modifying [" + TTLFieldMapper.NAME + "] not allowed"); + } } } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java index b8f389d171a..27a8a42d5e6 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java @@ -22,10 +22,15 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; +import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalSearchHit; +import org.junit.Before; +import org.mockito.Matchers; import java.util.HashMap; import java.util.Map; @@ -33,18 +38,35 @@ import java.util.function.Consumer; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< Request extends AbstractBulkIndexByScrollRequest, Response extends BulkIndexByScrollResponse> extends AbstractAsyncBulkIndexByScrollActionTestCase { + + private static final Script EMPTY_SCRIPT = new Script(""); + + protected ScriptService scriptService; + + @Before + public void setupScriptService() { + scriptService = mock(ScriptService.class); + } + protected IndexRequest applyScript(Consumer> scriptBody) { IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar")); Map fields = new HashMap<>(); InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields); doc.shardTarget(new SearchShardTarget("nodeid", new Index("index", "uuid"), 1)); - ExecutableScript script = new SimpleExecutableScript(scriptBody); - action().applyScript(index, doc, script, new HashMap<>()); + ExecutableScript executableScript = new SimpleExecutableScript(scriptBody); + + when(scriptService.executable(any(CompiledScript.class), Matchers.>any())) + .thenReturn(executableScript); + AbstractAsyncBulkIndexByScrollAction action = action(scriptService, request().setScript(EMPTY_SCRIPT)); + action.buildScriptApplier().apply(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc); return index; } @@ -53,7 +75,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< applyScript((Map ctx) -> ctx.put("junk", "junk")); fail("Expected error"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Invalid fields added to ctx [junk]")); + assertThat(e.getMessage(), equalTo("Invalid fields added to context [junk]")); } } @@ -65,4 +87,6 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< }); assertEquals("cat", index.sourceAsMap().get("bar")); } + + protected abstract AbstractAsyncBulkIndexByScrollAction action(ScriptService scriptService, Request request); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java index ab5f8f0d748..b9489e9f5d9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java @@ -46,8 +46,6 @@ public abstract class AbstractAsyncBulkIndexByScrollActionTestCase< threadPool.shutdown(); } - protected abstract AbstractAsyncBulkIndexByScrollAction action(); - protected abstract Request request(); protected PlainActionFuture listener() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java index 37386abf12e..5a9976fc005 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java @@ -49,13 +49,15 @@ public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase< public void testTimestampIsCopied() { IndexRequest index = new IndexRequest(); - action().copyMetadata(index, doc(TimestampFieldMapper.NAME, 10L)); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TimestampFieldMapper.NAME, 10L)); assertEquals("10", index.timestamp()); } public void testTTL() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(index, doc(TTLFieldMapper.NAME, 10L)); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TTLFieldMapper.NAME, 10L)); assertEquals(timeValueMillis(10), index.ttl()); } + + protected abstract AbstractAsyncBulkIndexByScrollAction action(); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index c9bd22d3552..d78fcfe69ca 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -665,7 +665,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } private class DummyAbstractAsyncBulkByScrollAction - extends AbstractAsyncBulkByScrollAction { + extends AbstractAsyncBulkByScrollAction { public DummyAbstractAsyncBulkByScrollAction() { super(testTask, logger, new ParentTaskAssigningClient(client, localNode, testTask), threadPool, testRequest, firstSearchRequest, listener); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java index 5bce3260929..4ef16c59141 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java @@ -30,6 +30,8 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher createdMatcher = equalTo(0L); private Matcher updatedMatcher = equalTo(0L); + private Matcher deletedMatcher = equalTo(0L); + /** * Matches for number of batches. Optional. */ @@ -56,6 +58,15 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher deletedMatcher) { + this.deletedMatcher = deletedMatcher; + return this; + } + + public BulkIndexByScrollResponseMatcher deleted(long deleted) { + return deleted(equalTo(deleted)); + } + /** * Set the matches for the number of batches. Defaults to matching any * integer because we usually don't care about how many batches the job @@ -110,6 +121,7 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + builders.add(client().prepareIndex("test", "doc", String.valueOf(i)).setSource("fields1", 1)); + } + indexRandom(true, true, true, builders); + + assertThat(deleteByQuery().source("t*").refresh(true).get(), matcher().deleted(docs)); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); + } + + public void testDeleteByQueryWithMultipleIndices() throws Exception { + final int indices = randomIntBetween(2, 5); + final int docs = randomIntBetween(2, 10) * 2; + long[] candidates = new long[indices]; + + // total number of expected deletions + long deletions = 0; + + List builders = new ArrayList<>(); + for (int i = 0; i < indices; i++) { + // number of documents to be deleted with the upcoming delete-by-query + // (this number differs for each index) + candidates[i] = randomIntBetween(1, docs); + deletions = deletions + candidates[i]; + + for (int j = 0; j < docs; j++) { + boolean candidate = (j < candidates[i]); + builders.add(client().prepareIndex("test-" + i, "doc", String.valueOf(j)).setSource("candidate", candidate)); + } + } + indexRandom(true, true, true, builders); + + // Deletes all the documents with candidate=true + assertThat(deleteByQuery().source("test-*").filter(termQuery("candidate", true)).refresh(true).get(), + matcher().deleted(deletions)); + + for (int i = 0; i < indices; i++) { + long remaining = docs - candidates[i]; + assertHitCount(client().prepareSearch("test-" + i).setSize(0).get(), remaining); + } + + assertHitCount(client().prepareSearch().setSize(0).get(), (indices * docs) - deletions); + } + + public void testDeleteByQueryWithMissingIndex() throws Exception { + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a")); + assertHitCount(client().prepareSearch().setSize(0).get(), 1); + + try { + deleteByQuery().source("missing").get(); + fail("should have thrown an exception because of a missing index"); + } catch (IndexNotFoundException e) { + // Ok + } + } + + public void testDeleteByQueryWithRouting() throws Exception { + assertAcked(prepareCreate("test").setSettings("number_of_shards", 2)); + ensureGreen("test"); + + final int docs = randomIntBetween(2, 10); + logger.info("--> indexing [{}] documents with routing", docs); + + List builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + builders.add(client().prepareIndex("test", "test", String.valueOf(i)).setRouting(String.valueOf(i)).setSource("field1", 1)); + } + indexRandom(true, true, true, builders); + + logger.info("--> counting documents with no routing, should be equal to [{}]", docs); + assertHitCount(client().prepareSearch().setSize(0).get(), docs); + + String routing = String.valueOf(randomIntBetween(2, docs)); + + logger.info("--> counting documents with routing [{}]", routing); + long expected = client().prepareSearch().setSize(0).setRouting(routing).get().getHits().totalHits(); + + logger.info("--> delete all documents with routing [{}] with a delete-by-query", routing); + DeleteByQueryRequestBuilder delete = deleteByQuery().source("test"); + delete.source().setRouting(routing); + assertThat(delete.refresh(true).get(), matcher().deleted(expected)); + + assertHitCount(client().prepareSearch().setSize(0).get(), docs - expected); + } + + public void testDeleteByMatchQuery() throws Exception { + assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); + + final int docs = scaledRandomIntBetween(10, 100); + + List builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + builders.add(client().prepareIndex("test", "test", Integer.toString(i)) + .setRouting(randomAsciiOfLengthBetween(1, 5)) + .setSource("foo", "bar")); + } + indexRandom(true, true, true, builders); + + int n = between(0, docs - 1); + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(matchQuery("_id", Integer.toString(n))).get(), 1); + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get(), docs); + + DeleteByQueryRequestBuilder delete = deleteByQuery().source("alias").filter(matchQuery("_id", Integer.toString(n))); + assertThat(delete.refresh(true).get(), matcher().deleted(1L)); + + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get(), docs - 1); + } + + public void testDeleteByQueryWithDateMath() throws Exception { + indexRandom(true, client().prepareIndex("test", "type", "1").setSource("d", "2013-01-01")); + + DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(rangeQuery("d").to("now-1h")); + assertThat(delete.refresh(true).get(), matcher().deleted(1L)); + + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); + } + + public void testDeleteByQueryOnReadOnlyIndex() throws Exception { + createIndex("test"); + + final int docs = randomIntBetween(1, 50); + List builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + builders.add(client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", 1)); + } + indexRandom(true, true, true, builders); + + try { + enableIndexBlock("test", IndexMetaData.SETTING_READ_ONLY); + assertThat(deleteByQuery().source("test").refresh(true).get(), matcher().deleted(0).failures(docs)); + } finally { + disableIndexBlock("test", IndexMetaData.SETTING_READ_ONLY); + } + + assertHitCount(client().prepareSearch("test").setSize(0).get(), docs); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryCancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryCancelTests.java new file mode 100644 index 00000000000..6007b646429 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryCancelTests.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexingOperationListener; +import org.elasticsearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +/** + * Tests that you can actually cancel a delete-by-query request and all the plumbing works. Doesn't test all of the different cancellation + * places - that is the responsibility of {@link AsyncBulkByScrollActionTests} which have more precise control to simulate failures but do + * not exercise important portion of the stack like transport and task management. + */ +public class DeleteByQueryCancelTests extends ReindexTestCase { + + private static final String INDEX = "test-delete-by-query"; + private static final String TYPE = "test"; + + private static final int MAX_DELETIONS = 10; + private static final CyclicBarrier barrier = new CyclicBarrier(2); + + @Override + protected int numberOfShards() { + // Only 1 shard and no replica so that test execution + // can be easily controlled within a {@link IndexingOperationListener#preDelete} + return 1; + } + + @Override + protected int numberOfReplicas() { + // Only 1 shard and no replica so that test execution + // can be easily controlled within a {@link IndexingOperationListener#preDelete} + return 0; + } + + @Override + protected Collection> nodePlugins() { + Collection> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(DeleteByQueryCancellationPlugin.class); + return plugins; + } + + public void testCancel() throws Exception { + createIndex(INDEX); + + int totalNumShards = getNumShards(INDEX).totalNumShards; + + // Number of documents to be deleted in this test + final int nbDocsToDelete = totalNumShards * MAX_DELETIONS; + + // Total number of documents that will be created in this test + final int nbDocs = nbDocsToDelete * randomIntBetween(1, 5); + for (int i = 0; i < nbDocs; i++) { + indexRandom(false, client().prepareIndex(INDEX, TYPE, String.valueOf(i)).setSource("n", i)); + } + + refresh(INDEX); + assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), nbDocs); + + // Executes the delete by query; each shard will block after MAX_DELETIONS + DeleteByQueryRequestBuilder deleteByQuery = deleteByQuery().source("_all"); + deleteByQuery.source().setSize(1); + + ListenableActionFuture future = deleteByQuery.execute(); + + // Waits for the indexing operation listener to block + barrier.await(30, TimeUnit.SECONDS); + + // Status should show running + ListTasksResponse tasksList = client().admin().cluster().prepareListTasks() + .setActions(DeleteByQueryAction.NAME).setDetailed(true).get(); + assertThat(tasksList.getNodeFailures(), empty()); + assertThat(tasksList.getTaskFailures(), empty()); + assertThat(tasksList.getTasks(), hasSize(1)); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) tasksList.getTasks().get(0).getStatus(); + assertNull(status.getReasonCancelled()); + + // Cancel the request while the deletions are blocked. This will prevent further deletions requests from being sent. + List cancelledTasks = client().admin().cluster().prepareCancelTasks() + .setActions(DeleteByQueryAction.NAME).get().getTasks(); + assertThat(cancelledTasks, hasSize(1)); + + // The status should now show canceled. The request will still be in the list because the script is still blocked. + tasksList = client().admin().cluster().prepareListTasks().setActions(DeleteByQueryAction.NAME).setDetailed(true).get(); + assertThat(tasksList.getNodeFailures(), empty()); + assertThat(tasksList.getTaskFailures(), empty()); + assertThat(tasksList.getTasks(), hasSize(1)); + status = (BulkByScrollTask.Status) tasksList.getTasks().get(0).getStatus(); + assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); + + // Now unblock the listener so that it can proceed + barrier.await(); + + // And check the status of the response + BulkIndexByScrollResponse response = future.get(); + assertThat(response, matcher() + .deleted(lessThanOrEqualTo((long) MAX_DELETIONS)).batches(MAX_DELETIONS).reasonCancelled(equalTo("by user request"))); + } + + + public static class DeleteByQueryCancellationPlugin extends Plugin { + + @Override + public String name() { + return "delete-by-query-cancellation"; + } + + @Override + public String description() { + return "See " + DeleteByQueryCancellationPlugin.class.getName(); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.addIndexOperationListener(new BlockingDeleteListener()); + } + } + + /** + * A {@link IndexingOperationListener} that allows a given number of documents to be deleted + * and then blocks until it is notified to proceed. + */ + public static class BlockingDeleteListener implements IndexingOperationListener { + + private final CountDown blockAfter = new CountDown(MAX_DELETIONS); + + @Override + public Engine.Delete preDelete(Engine.Delete delete) { + if (blockAfter.isCountedDown() || (TYPE.equals(delete.type()) == false)) { + return delete; + } + + if (blockAfter.countDown()) { + try { + // Tell the test we've deleted enough documents. + barrier.await(30, TimeUnit.SECONDS); + + // Wait for the test to tell us to proceed. + barrier.await(30, TimeUnit.SECONDS); + } catch (InterruptedException | BrokenBarrierException | TimeoutException e) { + throw new RuntimeException(e); + } + } + return delete; + } + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryConcurrentTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryConcurrentTests.java new file mode 100644 index 00000000000..40a776626a3 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryConcurrentTests.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class DeleteByQueryConcurrentTests extends ReindexTestCase { + + public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Throwable { + final Thread[] threads = new Thread[scaledRandomIntBetween(2, 5)]; + final long docs = randomIntBetween(1, 50); + + List builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + for (int t = 0; t < threads.length; t++) { + builders.add(client().prepareIndex("test", "doc").setSource("field", t)); + } + } + indexRandom(true, true, true, builders); + + final CountDownLatch start = new CountDownLatch(1); + for (int t = 0; t < threads.length; t++) { + final int threadNum = t; + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", threadNum)).get(), docs); + + Runnable r = () -> { + try { + start.await(); + + assertThat(deleteByQuery().source("_all").filter(termQuery("field", threadNum)).refresh(true).get(), + matcher().deleted(docs)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }; + threads[t] = new Thread(r); + threads[t].start(); + } + + start.countDown(); + for (Thread thread : threads) { + thread.join(); + } + + for (int t = 0; t < threads.length; t++) { + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", t)).get(), 0); + } + } + + public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable { + final long docs = randomIntBetween(50, 100); + + List builders = new ArrayList<>(); + for (int i = 0; i < docs; i++) { + builders.add(client().prepareIndex("test", "doc", String.valueOf(i)).setSource("foo", "bar")); + } + indexRandom(true, true, true, builders); + + final Thread[] threads = new Thread[scaledRandomIntBetween(2, 9)]; + + final CountDownLatch start = new CountDownLatch(1); + final MatchQueryBuilder query = matchQuery("foo", "bar"); + final AtomicLong deleted = new AtomicLong(0); + + for (int t = 0; t < threads.length; t++) { + Runnable r = () -> { + try { + start.await(); + + BulkIndexByScrollResponse response = deleteByQuery().source("test").filter(query).refresh(true).get(); + // Some deletions might fail due to version conflict, but + // what matters here is the total of successful deletions + deleted.addAndGet(response.getDeleted()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }; + threads[t] = new Thread(r); + threads[t].start(); + } + + start.countDown(); + for (Thread thread : threads) { + thread.join(); + } + + assertHitCount(client().prepareSearch("test").setSize(0).get(), 0L); + assertThat(deleted.get(), equalTo(docs)); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java index e82dd12a407..670fcefbf55 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase { public void testRoutingCopiedByDefault() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals("foo", index.routing()); } @@ -37,7 +37,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("keep"); IndexRequest index = new IndexRequest(); - action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals("foo", index.routing()); } @@ -45,7 +45,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("discard"); IndexRequest index = new IndexRequest(); - action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals(null, index.routing()); } @@ -53,7 +53,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("=cat"); IndexRequest index = new IndexRequest(); - action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals("cat", index.routing()); } @@ -61,13 +61,13 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("==]"); IndexRequest index = new IndexRequest(); - action.copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals("=]", index.routing()); } @Override protected TransportReindexAction.AsyncIndexBySearchAction action() { - return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, null, threadPool, request(), listener()); + return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request(), listener(), null, null); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java index b805dbd2772..74b7548cd63 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java @@ -20,7 +20,10 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptService; import java.util.Map; @@ -31,6 +34,7 @@ import static org.hamcrest.Matchers.containsString; * Tests index-by-search with a script modifying the documents. */ public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScriptTestCase { + public void testSetIndex() throws Exception { Object dest = randomFrom(new Object[] {234, 234L, "pancake"}); IndexRequest index = applyScript((Map ctx) -> ctx.put("_index", dest)); @@ -129,11 +133,12 @@ public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScri @Override protected ReindexRequest request() { - return new ReindexRequest(); + return new ReindexRequest(new SearchRequest(), new IndexRequest()); } @Override - protected AbstractAsyncBulkIndexByScrollAction action() { - return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, null, null, threadPool, request(), listener()); + protected AbstractAsyncBulkIndexByScrollAction action(ScriptService scriptService, ReindexRequest request) { + return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, listener(), scriptService, + null); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java index 33c72baa7cb..f4a777a1973 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java @@ -43,6 +43,10 @@ public abstract class ReindexTestCase extends ESIntegTestCase { return UpdateByQueryAction.INSTANCE.newRequestBuilder(client()); } + protected DeleteByQueryRequestBuilder deleteByQuery() { + return DeleteByQueryAction.INSTANCE.newRequestBuilder(client()); + } + protected RethrottleRequestBuilder rethrottle() { return RethrottleAction.INSTANCE.newRequestBuilder(client()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index 5c5e45993b9..bb6a33b593a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -27,14 +27,13 @@ public class UpdateByQueryMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase { public void testRoutingIsCopied() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(index, doc(RoutingFieldMapper.NAME, "foo")); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); assertEquals("foo", index.routing()); } @Override protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action() { - return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, null, request(), - listener()); + return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request(), listener(), null, null); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 62f405cb0cd..1c57c202766 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.script.ScriptService; + import java.util.Date; import java.util.Map; @@ -26,6 +28,7 @@ import static org.hamcrest.Matchers.containsString; public class UpdateByQueryWithScriptTests extends AbstractAsyncBulkIndexByScrollActionScriptTestCase { + public void testModifyingCtxNotAllowed() { /* * Its important that none of these actually match any of the fields. @@ -49,7 +52,8 @@ public class UpdateByQueryWithScriptTests } @Override - protected AbstractAsyncBulkIndexByScrollAction action() { - return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, null, threadPool, null, request(), listener()); + protected AbstractAsyncBulkIndexByScrollAction action(ScriptService scriptService, UpdateByQueryRequest request) { + return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, listener(), + scriptService, null); } } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml new file mode 100644 index 00000000000..bdad5f581bc --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml @@ -0,0 +1,304 @@ +--- +"Basic response": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + body: + query: + match_all: {} + + - is_false: timed_out + - match: {deleted: 1} + - is_false: created + - is_false: updated + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {noops: 0} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 0} + +--- +"wait_for_completion=false": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + wait_for_completion: false + index: test + body: + query: + match_all: {} + + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: updated + - is_false: deleted + + - do: + tasks.list: + wait_for_completion: true + task_id: $task + - is_false: node_failures + +--- +"Response for version conflict": + - do: + indices.create: + index: test + body: + settings: + index.refresh_interval: -1 + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + # Creates a new version for reindex to miss on scan. + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test2" } + + - do: + catch: conflict + delete_by_query: + index: test + body: + query: + match_all: {} + + - match: {deleted: 0} + - match: {version_conflicts: 1} + - match: {batches: 1} + - match: {failures.0.index: test} + - match: {failures.0.type: foo} + - match: {failures.0.id: "1"} + - match: {failures.0.status: 409} + - match: {failures.0.cause.type: version_conflict_engine_exception} + # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. + - match: {failures.0.cause.reason: "/\\[foo\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.shard: /\d+/} + - match: {failures.0.cause.index: test} + - gte: { took: 0 } + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 1} + +--- +"Response for version conflict with conflicts=proceed": + - do: + indices.create: + index: test + body: + settings: + index.refresh_interval: -1 + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + # Creates a new version for reindex to miss on scan. + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test2" } + + - do: + delete_by_query: + index: test + conflicts: proceed + body: + query: + match_all: {} + + - match: {deleted: 0} + - match: {version_conflicts: 1} + - match: {batches: 1} + - match: {noops: 0} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 1} + +--- +"Limit by query": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "user": "junk" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: twitter + refresh: true + body: + query: + match: + user: kimchy + - match: {deleted: 1} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - gte: { took: 0 } + + - do: + count: + index: twitter + + - match: {count: 1} + +--- +"Limit by size": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "user": "kimchy" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: twitter + size: 1 + body: + query: + match_all: {} + + - match: {deleted: 1} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + + - do: + indices.refresh: {} + + - do: + count: + index: twitter + + - match: {count: 1} + +--- +"Can override scroll_size": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + refresh: true + scroll_size: 1 + body: + query: + match_all: {} + + - match: {batches: 3} + - match: {deleted: 3} + + - do: + count: + index: test + + - match: {count: 0} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml new file mode 100644 index 00000000000..8648c9034ee --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml @@ -0,0 +1,99 @@ +--- +"no body fails": + - do: + catch: param + delete_by_query: + index: _all + +--- +"invalid conflicts fails": + - do: + index: + index: test + type: test + id: 1 + body: { "text": "test" } + - do: + catch: /conflicts may only be .* but was \[cat\]/ + delete_by_query: + index: test + conflicts: cat + body: + query: + match_all: {} + +--- +"invalid size fails": + - do: + index: + index: test + type: test + id: 1 + body: { "text": "test" } + - do: + catch: /size should be greater than 0 if the request is limited to some number of documents or -1 if it isn't but it was \[-4\]/ + delete_by_query: + index: test + size: -4 + body: + query: + match_all: {} + +--- +"invalid scroll_size fails": + - do: + index: + index: test + type: test + id: 1 + body: { "text": "test" } + - do: + catch: /Failed to parse int parameter \[scroll_size\] with value \[asdf\]/ + delete_by_query: + index: test + scroll_size: asdf + body: + query: + match_all: {} + +--- +"source fields may not be modified": + - do: + catch: /fields is not supported in this context/ + delete_by_query: + index: test + body: + fields: [_id] + +--- +"requests_per_second cannot be an empty string": + - do: + catch: /\[requests_per_second\] must be a float greater than 0. Use "unlimited" to disable throttling./ + delete_by_query: + requests_per_second: "" + index: test + body: + query: + match_all: {} + +--- +"requests_per_second cannot be negative": + - do: + catch: /\[requests_per_second\] must be a float greater than 0. Use "unlimited" to disable throttling./ + delete_by_query: + requests_per_second: -12 + index: test + body: + query: + match_all: {} + +--- +"requests_per_second cannot be zero": + - do: + catch: /\[requests_per_second\] must be a float greater than 0. Use "unlimited" to disable throttling./ + delete_by_query: + requests_per_second: 0 + index: test + body: + query: + match_all: {} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yaml new file mode 100644 index 00000000000..1ee249fc6bb --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yaml @@ -0,0 +1,72 @@ +--- +"Delete by type": + - do: + index: + index: test + type: t1 + id: 1 + body: { foo: bar } + - do: + index: + index: test + type: t1 + id: 2 + body: { foo: bar } + - do: + index: + index: test + type: t2 + id: 1 + body: { foo: bar } + - do: + index: + index: test + type: t2 + id: 2 + body: { foo: bar } + - do: + index: + index: test + type: t2 + id: 3 + body: { foo: baz } + - do: + indices.refresh: {} + - do: + count: + index: test + type: t2 + + - match: {count: 3} + + - do: + delete_by_query: + index: test + type: t2 + body: + query: + match: + foo: bar + + - is_false: timed_out + - match: {deleted: 2} + - is_false: created + - is_false: updated + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {noops: 0} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + + - do: + indices.refresh: {} + + - do: + count: + index: test + type: t2 + + - match: {count: 1} + diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/50_consistency.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/50_consistency.yaml new file mode 100644 index 00000000000..a69dfdfac04 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/50_consistency.yaml @@ -0,0 +1,62 @@ +--- +"can override consistency": + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 5 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: test + id: 1 + body: {"text": "test"} + consistency: one + - do: + indices.refresh: {} + + - do: + catch: unavailable + delete_by_query: + index: test + timeout: 1s + body: + query: + match_all: {} + + - match: + failures.0.cause.reason: /Not.enough.active.copies.to.meet.write.consistency.of.\[QUORUM\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.\[BulkShardRequest.to.\[test\].containing.\[1\].requests\]/ + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 1} + + - do: + delete_by_query: + index: test + consistency: one + body: + query: + match_all: {} + + - match: {failures: []} + - match: {deleted: 1} + - match: {version_conflicts: 0} + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 0} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml new file mode 100644 index 00000000000..0ff382ff751 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml @@ -0,0 +1,202 @@ +"Throttle the request": + # Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard + # and a small batch size on the request + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + scroll_size: 1 + requests_per_second: 1 + body: + query: + match_all: {} + + - match: {batches: 3} + - match: {deleted: 3} + - gt: {throttled_millis: 1000} + - lt: {throttled_millis: 4000} + +--- +"requests_per_second supports unlimited which turns off throttling": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + scroll_size: 1 + requests_per_second: unlimited + body: + query: + match_all: {} + + - match: {batches: 3} + - match: {deleted: 3} + - match: {throttled_millis: 0} + +--- +"Rethrottle": + # Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard + # and a small batch size on the request + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + requests_per_second: .00000001 # About 9.5 years to complete the request + wait_for_completion: false + index: test + scroll_size: 1 + body: + query: + match_all: {} + + - match: {task: '/.+:\d+/'} + - set: {task: task} + + - do: + reindex.rethrottle: + requests_per_second: unlimited + task_id: $task + + - do: + tasks.list: + wait_for_completion: true + task_id: $task + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 0} + +--- +"Rethrottle but not unlimited": + # Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard + # and a small batch size on the request + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + - do: + cluster.health: + wait_for_status: yellow + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + index: + index: test + type: foo + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + requests_per_second: .00000001 # About 9.5 years to complete the request + wait_for_completion: false + index: test + scroll_size: 1 + body: + query: + match_all: {} + + - match: {task: '/.+:\d+/'} + - set: {task: task} + + - do: + reindex.rethrottle: + requests_per_second: 1 + task_id: $task + + - do: + tasks.list: + wait_for_completion: true + task_id: $task diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml index 63d0edefe14..f4025383321 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml @@ -24,6 +24,7 @@ - match: {throttled_millis: 0} - gte: { took: 0 } - is_false: task + - is_false: deleted --- "Response format for updated": @@ -57,6 +58,7 @@ - match: {throttled_millis: 0} - gte: { took: 0 } - is_false: task + - is_false: deleted --- "wait_for_completion=false": @@ -88,6 +90,7 @@ - is_false: took - is_false: throttled_millis - is_false: created + - is_false: deleted - do: tasks.list: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml index dc54c0d91cf..a7a5198e430 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml @@ -59,7 +59,7 @@ --- "search size fails if not a number": - do: - catch: '/NumberFormatException: For input string: "cat"/' + catch: '/number_format_exception.*For input string: \"cat\"/' reindex: body: source: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml index 89266198f2c..843bb9b6eb5 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml @@ -23,6 +23,7 @@ # Update by query can't create - is_false: created - is_false: task + - is_false: deleted --- "wait_for_completion=false": @@ -49,6 +50,7 @@ - is_false: took - is_false: throttled_millis - is_false: created + - is_false: deleted - do: tasks.list: diff --git a/plugins/jvm-example/src/main/bin/test b/plugins/jvm-example/src/main/bin/test index 76ba88943ac..13fdcce1e52 100755 --- a/plugins/jvm-example/src/main/bin/test +++ b/plugins/jvm-example/src/main/bin/test @@ -1,3 +1,3 @@ -#!/bin/sh +#!/bin/bash echo test diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index f851a0e55cb..f2155e69e2d 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -25,9 +25,9 @@ import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.ESBlobStoreTestCase; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositorySettings; -import org.elasticsearch.test.ESBlobStoreTestCase; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle new file mode 100644 index 00000000000..9968d4408e4 --- /dev/null +++ b/plugins/repository-gcs/build.gradle @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'The GCS repository plugin adds Google Cloud Storage support for repositories.' + classname 'org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin' +} + +versions << [ + 'google': '1.21.0', +] + +dependencies { + compile "com.google.apis:google-api-services-storage:v1-rev66-${versions.google}" + compile "com.google.api-client:google-api-client:${versions.google}" + compile "com.google.oauth-client:google-oauth-client:${versions.google}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "com.google.http-client:google-http-client:${versions.google}" + compile "com.google.http-client:google-http-client-jackson2:${versions.google}" +} + +dependencyLicenses { + mapping from: /google-.*/, to: 'google' +} + +thirdPartyAudit.excludes = [ + // classes are missing + 'com.google.common.base.Splitter', + 'com.google.common.collect.Lists', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', +] diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 00000000000..3fe8682a1b0 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt new file mode 100644 index 00000000000..56916449bbe --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 00000000000..5b8f029e582 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt new file mode 100644 index 00000000000..57bc88a15a0 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt new file mode 100644 index 00000000000..72eb32a9024 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-gcs/licenses/google-LICENSE.txt b/plugins/repository-gcs/licenses/google-LICENSE.txt new file mode 100644 index 00000000000..980a15ac24e --- /dev/null +++ b/plugins/repository-gcs/licenses/google-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-NOTICE.txt b/plugins/repository-gcs/licenses/google-NOTICE.txt new file mode 100644 index 00000000000..8d1c8b69c3f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/plugins/repository-gcs/licenses/google-api-client-1.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.21.0.jar.sha1 new file mode 100644 index 00000000000..56988521028 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.21.0.jar.sha1 @@ -0,0 +1 @@ +16a6b3c680f3bf7b81bb42790ff5c1b72c5bbedc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev66-1.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev66-1.21.0.jar.sha1 new file mode 100644 index 00000000000..2a97aed2b79 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev66-1.21.0.jar.sha1 @@ -0,0 +1 @@ +eb753d716e4f8dec203deb0f8fdca86913a79029 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.21.0.jar.sha1 new file mode 100644 index 00000000000..401abd444ce --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.21.0.jar.sha1 @@ -0,0 +1 @@ +42631630fe1276d4d6d6397bb07d53a4e4fec278 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.21.0.jar.sha1 new file mode 100644 index 00000000000..e7ca5887412 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.21.0.jar.sha1 @@ -0,0 +1 @@ +8ce17bdd15fff0fd8cf359757f29e778fc7191ad \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.21.0.jar.sha1 new file mode 100644 index 00000000000..7e3de94a9bc --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.21.0.jar.sha1 @@ -0,0 +1 @@ +61ec42bbfc51aafde5eb8b4923c602c5b5965bc2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpclient-4.3.6.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.3.6.jar.sha1 new file mode 100644 index 00000000000..2c18ef0f54c --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.3.6.jar.sha1 @@ -0,0 +1 @@ +4c47155e3e6c9a41a28db36680b828ced53b8af4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/httpclient-LICENSE.txt new file mode 100644 index 00000000000..32f01eda18f --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/plugins/repository-gcs/licenses/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/httpclient-NOTICE.txt new file mode 100644 index 00000000000..4f6058178b2 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-gcs/licenses/httpcore-4.3.3.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.3.3.jar.sha1 new file mode 100644 index 00000000000..0ad1d24aa9f --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-4.3.3.jar.sha1 @@ -0,0 +1 @@ +f91b7a4aadc5cf486df6e4634748d7dd7a73f06d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/httpcore-LICENSE.txt new file mode 100644 index 00000000000..72819a9f06f --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/repository-gcs/licenses/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/httpcore-NOTICE.txt new file mode 100644 index 00000000000..c0be50a505e --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobContainer.java new file mode 100644 index 00000000000..d8117180ce3 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobContainer.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore.gcs; + +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStoreException; +import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.common.bytes.BytesReference; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + + +public class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { + + private final GoogleCloudStorageBlobStore blobStore; + private final String path; + + GoogleCloudStorageBlobContainer(BlobPath path, GoogleCloudStorageBlobStore blobStore) { + super(path); + this.blobStore = blobStore; + + String keyPath = path.buildAsString("/"); + // TODO Move this keyPath logic to the buildAsString() method + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + this.path = keyPath; + } + + @Override + public boolean blobExists(String blobName) { + try { + return blobStore.blobExists(buildKey(blobName)); + } catch (Exception e) { + throw new BlobStoreException("Failed to check if blob [" + blobName + "] exists", e); + } + } + + @Override + public Map listBlobs() throws IOException { + return blobStore.listBlobs(path); + } + + @Override + public Map listBlobsByPrefix(String prefix) throws IOException { + return blobStore.listBlobsByPrefix(path, prefix); + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + return blobStore.readBlob(buildKey(blobName)); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); + } + + @Override + public void writeBlob(String blobName, BytesReference bytes) throws IOException { + writeBlob(blobName, bytes.streamInput(), bytes.length()); + } + + @Override + public void deleteBlob(String blobName) throws IOException { + blobStore.deleteBlob(buildKey(blobName)); + } + + @Override + public void deleteBlobsByPrefix(String prefix) throws IOException { + blobStore.deleteBlobsByPrefix(buildKey(prefix)); + } + + @Override + public void deleteBlobs(Collection blobNames) throws IOException { + blobStore.deleteBlobs(buildKeys(blobNames)); + } + + @Override + public void move(String sourceBlobName, String targetBlobName) throws IOException { + blobStore.moveBlob(buildKey(sourceBlobName), buildKey(targetBlobName)); + } + + protected String buildKey(String blobName) { + assert blobName != null; + return path + blobName; + } + + protected Set buildKeys(Collection blobNames) { + Set keys = new HashSet<>(); + if (blobNames != null) { + keys.addAll(blobNames.stream().map(this::buildKey).collect(Collectors.toList())); + } + return keys; + } +} diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStore.java new file mode 100644 index 00000000000..7bf79494440 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStore.java @@ -0,0 +1,432 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore.gcs; + +import com.google.api.client.googleapis.batch.BatchRequest; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.InputStreamContent; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.Objects; +import com.google.api.services.storage.model.StorageObject; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.BlobStoreException; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; + +public class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore { + + /** + * Google Cloud Storage batch requests are limited to 1000 operations + **/ + private static final int MAX_BATCHING_REQUESTS = 999; + + private final Storage client; + private final String bucket; + + public GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storageClient) { + super(settings); + this.bucket = bucket; + this.client = storageClient; + + if (doesBucketExist(bucket) == false) { + throw new BlobStoreException("Bucket [" + bucket + "] does not exist"); + } + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + return new GoogleCloudStorageBlobContainer(path, this); + } + + @Override + public void delete(BlobPath path) throws IOException { + String keyPath = path.buildAsString("/"); + // TODO Move this keyPath logic to the buildAsString() method + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + deleteBlobsByPrefix(keyPath); + } + + @Override + public void close() { + } + + /** + * Return true if the given bucket exists + * + * @param bucketName name of the bucket + * @return true if the bucket exists, false otherwise + */ + boolean doesBucketExist(String bucketName) { + try { + return doPrivileged(() -> { + try { + Bucket bucket = client.buckets().get(bucketName).execute(); + if (bucket != null) { + return Strings.hasText(bucket.getId()); + } + } catch (GoogleJsonResponseException e) { + GoogleJsonError error = e.getDetails(); + if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { + return false; + } + throw e; + } + return false; + }); + } catch (IOException e) { + throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e); + } + } + + /** + * List all blobs in the bucket + * + * @param path base path of the blobs to list + * @return a map of blob names and their metadata + */ + Map listBlobs(String path) throws IOException { + return doPrivileged(() -> listBlobsByPath(bucket, path, path)); + } + + /** + * List all blobs in the bucket which have a prefix + * + * @param path base path of the blobs to list + * @param prefix prefix of the blobs to list + * @return a map of blob names and their metadata + */ + Map listBlobsByPrefix(String path, String prefix) throws IOException { + return doPrivileged(() -> listBlobsByPath(bucket, buildKey(path, prefix), path)); + } + + /** + * Lists all blobs in a given bucket + * + * @param bucketName name of the bucket + * @param path base path of the blobs to list + * @param pathToRemove if true, this path part is removed from blob name + * @return a map of blob names and their metadata + */ + private Map listBlobsByPath(String bucketName, String path, String pathToRemove) throws IOException { + return blobsStream(client, bucketName, path, MAX_BATCHING_REQUESTS) + .map(new BlobMetaDataConverter(pathToRemove)) + .collect(Collectors.toMap(PlainBlobMetaData::name, Function.identity())); + } + + /** + * Returns true if the blob exists in the bucket + * + * @param blobName name of the blob + * @return true if the blob exists, false otherwise + */ + boolean blobExists(String blobName) throws IOException { + return doPrivileged(() -> { + try { + StorageObject blob = client.objects().get(bucket, blobName).execute(); + if (blob != null) { + return Strings.hasText(blob.getId()); + } + } catch (GoogleJsonResponseException e) { + GoogleJsonError error = e.getDetails(); + if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { + return false; + } + throw e; + } + return false; + }); + } + + /** + * Returns an {@link java.io.InputStream} for a given blob + * + * @param blobName name of the blob + * @return an InputStream + */ + InputStream readBlob(String blobName) throws IOException { + return doPrivileged(() -> { + try { + Storage.Objects.Get object = client.objects().get(bucket, blobName); + return object.executeMediaAsInputStream(); + } catch (GoogleJsonResponseException e) { + GoogleJsonError error = e.getDetails(); + if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { + throw new FileNotFoundException(e.getMessage()); + } + throw e; + } + }); + } + + /** + * Writes a blob in the bucket. + * + * @param inputStream content of the blob to be written + * @param blobSize expected size of the blob to be written + */ + void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + doPrivileged(() -> { + InputStreamContent stream = new InputStreamContent(null, inputStream); + stream.setLength(blobSize); + + Storage.Objects.Insert insert = client.objects().insert(bucket, null, stream); + insert.setName(blobName); + insert.execute(); + return null; + }); + } + + /** + * Deletes a blob in the bucket + * + * @param blobName name of the blob + */ + void deleteBlob(String blobName) throws IOException { + doPrivileged(() -> client.objects().delete(bucket, blobName).execute()); + } + + /** + * Deletes multiple blobs in the bucket that have a given prefix + * + * @param prefix prefix of the buckets to delete + */ + void deleteBlobsByPrefix(String prefix) throws IOException { + doPrivileged(() -> { + deleteBlobs(listBlobsByPath(bucket, prefix, null).keySet()); + return null; + }); + } + + /** + * Deletes multiple blobs in the given bucket (uses a batch request to perform this) + * + * @param blobNames names of the bucket to delete + */ + void deleteBlobs(Collection blobNames) throws IOException { + if (blobNames == null || blobNames.isEmpty()) { + return; + } + + if (blobNames.size() == 1) { + deleteBlob(blobNames.iterator().next()); + return; + } + + doPrivileged(() -> { + final List deletions = new ArrayList<>(); + final Iterator blobs = blobNames.iterator(); + + while (blobs.hasNext()) { + // Create a delete request for each blob to delete + deletions.add(client.objects().delete(bucket, blobs.next())); + + if (blobs.hasNext() == false || deletions.size() == MAX_BATCHING_REQUESTS) { + try { + // Deletions are executed using a batch request + BatchRequest batch = client.batch(); + + // Used to track successful deletions + CountDown countDown = new CountDown(deletions.size()); + + for (Storage.Objects.Delete delete : deletions) { + // Queue the delete request in batch + delete.queue(batch, new JsonBatchCallback() { + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + logger.error("failed to delete blob [{}] in bucket [{}]: {}", delete.getObject(), delete.getBucket(), e + .getMessage()); + } + + @Override + public void onSuccess(Void aVoid, HttpHeaders responseHeaders) throws IOException { + countDown.countDown(); + } + }); + } + + batch.execute(); + + if (countDown.isCountedDown() == false) { + throw new IOException("Failed to delete all [" + deletions.size() + "] blobs"); + } + } finally { + deletions.clear(); + } + } + } + return null; + }); + } + + /** + * Moves a blob within the same bucket + * + * @param sourceBlob name of the blob to move + * @param targetBlob new name of the blob in the target bucket + */ + void moveBlob(String sourceBlob, String targetBlob) throws IOException { + doPrivileged(() -> { + // There's no atomic "move" in GCS so we need to copy and delete + client.objects().copy(bucket, sourceBlob, bucket, targetBlob, null).execute(); + client.objects().delete(bucket, sourceBlob).execute(); + return null; + }); + } + + /** + * Executes a {@link PrivilegedExceptionAction} with privileges enabled. + */ + T doPrivileged(PrivilegedExceptionAction operation) throws IOException { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) operation::run); + } catch (PrivilegedActionException e) { + throw (IOException) e.getException(); + } + } + + private String buildKey(String keyPath, String s) { + assert s != null; + return keyPath + s; + } + + /** + * Converts a {@link StorageObject} to a {@link PlainBlobMetaData} + */ + class BlobMetaDataConverter implements Function { + + private final String pathToRemove; + + BlobMetaDataConverter(String pathToRemove) { + this.pathToRemove = pathToRemove; + } + + @Override + public PlainBlobMetaData apply(StorageObject storageObject) { + String blobName = storageObject.getName(); + if (Strings.hasLength(pathToRemove)) { + blobName = blobName.substring(pathToRemove.length()); + } + return new PlainBlobMetaData(blobName, storageObject.getSize().longValue()); + } + } + + /** + * Spliterator can be used to list storage objects stored in a bucket. + */ + static class StorageObjectsSpliterator implements Spliterator { + + private final Storage.Objects.List list; + + StorageObjectsSpliterator(Storage client, String bucketName, String prefix, long pageSize) throws IOException { + list = client.objects().list(bucketName); + list.setMaxResults(pageSize); + if (prefix != null) { + list.setPrefix(prefix); + } + } + + @Override + public boolean tryAdvance(Consumer action) { + try { + // Retrieves the next page of items + Objects objects = list.execute(); + + if ((objects == null) || (objects.getItems() == null) || (objects.getItems().isEmpty())) { + return false; + } + + // Consumes all the items + objects.getItems().forEach(action::accept); + + // Sets the page token of the next page, + // null indicates that all items have been consumed + String next = objects.getNextPageToken(); + if (next != null) { + list.setPageToken(next); + return true; + } + + return false; + } catch (Exception e) { + throw new BlobStoreException("Exception while listing objects", e); + } + } + + @Override + public Spliterator trySplit() { + return null; + } + + @Override + public long estimateSize() { + return Long.MAX_VALUE; + } + + @Override + public int characteristics() { + return 0; + } + } + + /** + * Returns a {@link Stream} of {@link StorageObject}s that are stored in a given bucket. + */ + static Stream blobsStream(Storage client, String bucketName, String prefix, long pageSize) throws IOException { + return StreamSupport.stream(new StorageObjectsSpliterator(client, bucketName, prefix, pageSize), false); + } + +} diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStorageModule.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStorageModule.java new file mode 100644 index 00000000000..8a4bf88ed74 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStorageModule.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.repository.gcs; + +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.repositories.gcs.GoogleCloudStorageService; + +public class GoogleCloudStorageModule extends AbstractModule { + + @Override + protected void configure() { + bind(GoogleCloudStorageService.class).to(GoogleCloudStorageService.InternalGoogleCloudStorageService.class).asEagerSingleton(); + } +} diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java new file mode 100644 index 00000000000..477a083fad5 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.repository.gcs; + +import com.google.api.client.auth.oauth2.TokenRequest; +import com.google.api.client.auth.oauth2.TokenResponse; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.json.GenericJson; +import com.google.api.client.json.webtoken.JsonWebSignature; +import com.google.api.client.json.webtoken.JsonWebToken; +import com.google.api.client.util.ClassInfo; +import com.google.api.client.util.Data; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.Objects; +import com.google.api.services.storage.model.StorageObject; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesModule; +import org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Collection; +import java.util.Collections; + +public class GoogleCloudStoragePlugin extends Plugin { + + public static final String NAME = "repository-gcs"; + + static { + /* + * Google HTTP client changes access levels because its silly and we + * can't allow that on any old stack stack so we pull it here, up front, + * so we can cleanly check the permissions for it. Without this changing + * the permission can fail if any part of core is on the stack because + * our plugin permissions don't allow core to "reach through" plugins to + * change the permission. Because that'd be silly. + */ + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + // ClassInfo put in cache all the fields of a given class + // that are annoted with @Key; at the same time it changes + // the field access level using setAccessible(). Calling + // them here put the ClassInfo in cache (they are never evicted) + // before the SecurityManager is installed. + ClassInfo.of(HttpHeaders.class, true); + + ClassInfo.of(JsonWebSignature.Header.class, false); + ClassInfo.of(JsonWebToken.Payload.class, false); + + ClassInfo.of(TokenRequest.class, false); + ClassInfo.of(TokenResponse.class, false); + + ClassInfo.of(GenericJson.class, false); + ClassInfo.of(GenericUrl.class, false); + + Data.nullOf(GoogleJsonError.ErrorInfo.class); + ClassInfo.of(GoogleJsonError.class, false); + + Data.nullOf(Bucket.Cors.class); + ClassInfo.of(Bucket.class, false); + ClassInfo.of(Bucket.Cors.class, false); + ClassInfo.of(Bucket.Lifecycle.class, false); + ClassInfo.of(Bucket.Logging.class, false); + ClassInfo.of(Bucket.Owner.class, false); + ClassInfo.of(Bucket.Versioning.class, false); + ClassInfo.of(Bucket.Website.class, false); + + ClassInfo.of(StorageObject.class, false); + ClassInfo.of(StorageObject.Owner.class, false); + + ClassInfo.of(Objects.class, false); + + ClassInfo.of(Storage.Buckets.Get.class, false); + ClassInfo.of(Storage.Buckets.Insert.class, false); + + ClassInfo.of(Storage.Objects.Get.class, false); + ClassInfo.of(Storage.Objects.Insert.class, false); + ClassInfo.of(Storage.Objects.Delete.class, false); + ClassInfo.of(Storage.Objects.Copy.class, false); + ClassInfo.of(Storage.Objects.List.class, false); + + return null; + }); + } + + @Override + public String name() { + return NAME; + } + + @Override + public String description() { + return "Google Cloud Storage Repository Plugin"; + } + + @Override + public Collection nodeModules() { + return Collections.singletonList(new GoogleCloudStorageModule()); + } + + public void onModule(RepositoriesModule repositoriesModule) { + repositoriesModule.registerRepository(GoogleCloudStorageRepository.TYPE, + GoogleCloudStorageRepository.class, BlobStoreIndexShardRepository.class); + } +} diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java new file mode 100644 index 00000000000..337fbcf8d72 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.google.api.services.storage.Storage; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.gcs.GoogleCloudStorageBlobStore; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; + +import java.util.function.Function; + +import static org.elasticsearch.common.settings.Setting.Property; +import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.byteSizeSetting; +import static org.elasticsearch.common.settings.Setting.simpleString; +import static org.elasticsearch.common.settings.Setting.timeSetting; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class GoogleCloudStorageRepository extends BlobStoreRepository { + + public static final String TYPE = "gcs"; + + public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); + + public static final Setting BUCKET = + simpleString("bucket", Property.NodeScope, Property.Dynamic); + public static final Setting BASE_PATH = + simpleString("base_path", Property.NodeScope, Property.Dynamic); + public static final Setting COMPRESS = + boolSetting("compress", false, Property.NodeScope, Property.Dynamic); + public static final Setting CHUNK_SIZE = + byteSizeSetting("chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope, Property.Dynamic); + public static final Setting APPLICATION_NAME = + new Setting<>("application_name", GoogleCloudStoragePlugin.NAME, Function.identity(), Property.NodeScope, Property.Dynamic); + public static final Setting SERVICE_ACCOUNT = + simpleString("service_account", Property.NodeScope, Property.Dynamic, Property.Filtered); + public static final Setting HTTP_READ_TIMEOUT = + timeSetting("http.read_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); + public static final Setting HTTP_CONNECT_TIMEOUT = + timeSetting("http.connect_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); + + private final ByteSizeValue chunkSize; + private final boolean compress; + private final BlobPath basePath; + private final GoogleCloudStorageBlobStore blobStore; + + @Inject + public GoogleCloudStorageRepository(RepositoryName repositoryName, RepositorySettings repositorySettings, + IndexShardRepository indexShardRepository, + GoogleCloudStorageService storageService) throws Exception { + super(repositoryName.getName(), repositorySettings, indexShardRepository); + + String bucket = get(BUCKET, repositoryName, repositorySettings); + String application = get(APPLICATION_NAME, repositoryName, repositorySettings); + String serviceAccount = get(SERVICE_ACCOUNT, repositoryName, repositorySettings); + + String basePath = BASE_PATH.get(repositorySettings.settings()); + if (Strings.hasLength(basePath)) { + BlobPath path = new BlobPath(); + for (String elem : basePath.split("/")) { + path = path.add(elem); + } + this.basePath = path; + } else { + this.basePath = BlobPath.cleanPath(); + } + + TimeValue connectTimeout = null; + TimeValue readTimeout = null; + + TimeValue timeout = HTTP_CONNECT_TIMEOUT.get(repositorySettings.settings()); + if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + connectTimeout = timeout; + } + + timeout = HTTP_READ_TIMEOUT.get(repositorySettings.settings()); + if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + readTimeout = timeout; + } + + this.compress = get(COMPRESS, repositoryName, repositorySettings); + this.chunkSize = get(CHUNK_SIZE, repositoryName, repositorySettings); + + logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}], application [{}]", + bucket, basePath, chunkSize, compress, application); + + Storage client = storageService.createClient(serviceAccount, application, connectTimeout, readTimeout); + this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client); + } + + + @Override + protected BlobStore blobStore() { + return blobStore; + } + + @Override + protected BlobPath basePath() { + return basePath; + } + + @Override + protected boolean isCompress() { + return compress; + } + + @Override + protected ByteSizeValue chunkSize() { + return chunkSize; + } + + /** + * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. + */ + static T get(Setting setting, RepositoryName name, RepositorySettings repositorySettings) { + T value = setting.get(repositorySettings.settings()); + if (value == null) { + throw new RepositoryException(name.getName(), "Setting [" + setting.getKey() + "] is not defined for repository"); + } + if ((value instanceof String) && (Strings.hasText((String) value)) == false) { + throw new RepositoryException(name.getName(), "Setting [" + setting.getKey() + "] is empty for repository"); + } + return value; + } +} diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java new file mode 100644 index 00000000000..098ce5f1504 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; +import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.HttpBackOffIOExceptionHandler; +import com.google.api.client.http.HttpBackOffUnsuccessfulResponseHandler; +import com.google.api.client.http.HttpIOExceptionHandler; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpUnsuccessfulResponseHandler; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.util.ExponentialBackOff; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.StorageScopes; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + +public interface GoogleCloudStorageService { + + /** + * Creates a client that can be used to manage Google Cloud Storage objects. + * + * @param serviceAccount path to service account file + * @param application name of the application + * @param connectTimeout connection timeout for HTTP requests + * @param readTimeout read timeout for HTTP requests + * @return a Client instance that can be used to manage objects + */ + Storage createClient(String serviceAccount, String application, TimeValue connectTimeout, TimeValue readTimeout) throws Exception; + + /** + * Default implementation + */ + class InternalGoogleCloudStorageService extends AbstractComponent implements GoogleCloudStorageService { + + private static final String DEFAULT = "_default_"; + + private final Environment environment; + + @Inject + public InternalGoogleCloudStorageService(Settings settings, Environment environment) { + super(settings); + this.environment = environment; + } + + @Override + public Storage createClient(String serviceAccount, String application, TimeValue connectTimeout, TimeValue readTimeout) + throws Exception { + try { + GoogleCredential credentials = (DEFAULT.equalsIgnoreCase(serviceAccount)) ? loadDefault() : loadCredentials(serviceAccount); + NetHttpTransport httpTransport = GoogleNetHttpTransport.newTrustedTransport(); + + Storage.Builder storage = new Storage.Builder(httpTransport, JacksonFactory.getDefaultInstance(), + new DefaultHttpRequestInitializer(credentials, connectTimeout, readTimeout)); + storage.setApplicationName(application); + + logger.debug("initializing client with service account [{}/{}]", + credentials.getServiceAccountId(), credentials.getServiceAccountUser()); + return storage.build(); + } catch (IOException e) { + throw new ElasticsearchException("Error when loading Google Cloud Storage credentials file", e); + } + } + + /** + * HTTP request initializer that loads credentials from the service account file + * and manages authentication for HTTP requests + */ + private GoogleCredential loadCredentials(String serviceAccount) throws IOException { + if (serviceAccount == null) { + throw new ElasticsearchException("Cannot load Google Cloud Storage service account file from a null path"); + } + + Path account = environment.configFile().resolve(serviceAccount); + if (Files.exists(account) == false) { + throw new ElasticsearchException("Unable to find service account file [" + serviceAccount + + "] defined for repository"); + } + + try (InputStream is = Files.newInputStream(account)) { + GoogleCredential credential = GoogleCredential.fromStream(is); + if (credential.createScopedRequired()) { + credential = credential.createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); + } + return credential; + } + } + + /** + * HTTP request initializer that loads default credentials when running on Compute Engine + */ + private GoogleCredential loadDefault() throws IOException { + return GoogleCredential.getApplicationDefault(); + } + + /** + * HTTP request initializer that set timeouts and backoff handler while deferring authentication to GoogleCredential. + * See https://cloud.google.com/storage/transfer/create-client#retry + */ + class DefaultHttpRequestInitializer implements HttpRequestInitializer { + + private final TimeValue connectTimeout; + private final TimeValue readTimeout; + private final GoogleCredential credential; + private final HttpUnsuccessfulResponseHandler handler; + private final HttpIOExceptionHandler ioHandler; + + DefaultHttpRequestInitializer(GoogleCredential credential, TimeValue connectTimeout, TimeValue readTimeout) { + this.credential = credential; + this.connectTimeout = connectTimeout; + this.readTimeout = readTimeout; + this.handler = new HttpBackOffUnsuccessfulResponseHandler(newBackOff()); + this.ioHandler = new HttpBackOffIOExceptionHandler(newBackOff()); + } + + @Override + public void initialize(HttpRequest request) throws IOException { + if (connectTimeout != null) { + request.setConnectTimeout((int) connectTimeout.millis()); + } + if (readTimeout != null) { + request.setReadTimeout((int) readTimeout.millis()); + } + + request.setIOExceptionHandler(ioHandler); + request.setInterceptor(credential); + + request.setUnsuccessfulResponseHandler((req, resp, supportsRetry) -> { + // Let the credential handle the response. If it failed, we rely on our backoff handler + return credential.handleResponse(req, resp, supportsRetry) || handler.handleResponse(req, resp, supportsRetry); + } + ); + } + + private ExponentialBackOff newBackOff() { + return new ExponentialBackOff.Builder() + .setInitialIntervalMillis(100) + .setMaxIntervalMillis(6000) + .setMaxElapsedTimeMillis(900000) + .setMultiplier(1.5) + .setRandomizationFactor(0.5) + .build(); + } + } + } +} diff --git a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 00000000000..bc7acd60602 --- /dev/null +++ b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +grant { + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "setFactory"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.net.URLPermission "http://www.googleapis.com/*", "*"; + permission java.net.URLPermission "https://www.googleapis.com/*", "*"; +}; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java new file mode 100644 index 00000000000..4fe8c718345 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore.gcs; + +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; + +import java.io.IOException; +import java.util.Locale; + +public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + + @Override + protected BlobStore newBlobStore() throws IOException { + String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockHttpTransport.newStorage(bucket, getTestName())); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreTests.java new file mode 100644 index 00000000000..b5489466b51 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreTests.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore.gcs; + +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.ESBlobStoreTestCase; + +import java.io.IOException; +import java.util.Locale; + +public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { + + @Override + protected BlobStore newBlobStore() throws IOException { + String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockHttpTransport.newStorage(bucket, getTestName())); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/MockHttpTransport.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/MockHttpTransport.java new file mode 100644 index 00000000000..196fcf12f87 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/MockHttpTransport.java @@ -0,0 +1,432 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore.gcs; + +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.LowLevelHttpRequest; +import com.google.api.client.http.LowLevelHttpResponse; +import com.google.api.client.json.Json; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.testing.http.MockLowLevelHttpRequest; +import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import com.google.api.services.storage.Storage; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.Callback; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.support.RestUtils; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * Mock for {@link HttpTransport} to test Google Cloud Storage service. + *

+ * This basically handles each type of request used by the {@link GoogleCloudStorageBlobStore} and provides appropriate responses like + * the Google Cloud Storage service would do. It is largely based on official documentation available at https://cloud.google + * .com/storage/docs/json_api/v1/. + */ +public class MockHttpTransport extends com.google.api.client.testing.http.MockHttpTransport { + + private final AtomicInteger objectsCount = new AtomicInteger(0); + private final Map objectsNames = ConcurrentCollections.newConcurrentMap(); + private final Map objectsContent = ConcurrentCollections.newConcurrentMap(); + + private final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + public MockHttpTransport(String bucket) { + + // GET Bucket + // + // https://cloud.google.com/storage/docs/json_api/v1/buckets/get + handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}", (url, params, req) -> { + String name = params.get("bucket"); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); + } + + if (name.equals(bucket)) { + return newMockResponse().setContent(buildBucketResource(bucket)); + } else { + return newMockError(RestStatus.NOT_FOUND, "bucket not found"); + } + }); + + // GET Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/get + handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { + String name = params.get("object"); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + for (Map.Entry object : objectsNames.entrySet()) { + if (object.getValue().equals(name)) { + byte[] content = objectsContent.get(object.getKey()); + if (content != null) { + return newMockResponse().setContent(buildObjectResource(bucket, name, object.getKey(), content.length)); + } + } + } + return newMockError(RestStatus.NOT_FOUND, "object not found"); + }); + + // Download Object + // + // https://cloud.google.com/storage/docs/request-endpoints + handlers.insert("GET https://www.googleapis.com/download/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { + String name = params.get("object"); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + for (Map.Entry object : objectsNames.entrySet()) { + if (object.getValue().equals(name)) { + byte[] content = objectsContent.get(object.getKey()); + if (content == null) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object content is missing"); + } + return newMockResponse().setContent(new ByteArrayInputStream(content)); + } + } + return newMockError(RestStatus.NOT_FOUND, "object not found"); + }); + + // Insert Object (initialization) + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/insert + handlers.insert("POST https://www.googleapis.com/upload/storage/v1/b/{bucket}/o", (url, params, req) -> { + if ("resumable".equals(params.get("uploadType")) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); + } + + String name = params.get("name"); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + String objectId = String.valueOf(objectsCount.getAndIncrement()); + objectsNames.put(objectId, name); + + return newMockResponse() + .setStatusCode(RestStatus.CREATED.getStatus()) + .addHeader("Location", "https://www.googleapis.com/upload/storage/v1/b/" + bucket + + "/o?uploadType=resumable&upload_id=" + objectId); + }); + + // Insert Object (upload) + // + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload + handlers.insert("PUT https://www.googleapis.com/upload/storage/v1/b/{bucket}/o", (url, params, req) -> { + String objectId = params.get("upload_id"); + if (Strings.hasText(objectId) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); + } + + String name = objectsNames.get(objectId); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.NOT_FOUND, "object name not found"); + } + + ByteArrayOutputStream os = new ByteArrayOutputStream((int) req.getContentLength()); + try { + req.getStreamingContent().writeTo(os); + os.close(); + } catch (IOException e) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()); + } + + byte[] content = os.toByteArray(); + objectsContent.put(objectId, content); + return newMockResponse().setContent(buildObjectResource(bucket, name, objectId, content.length)); + }); + + // List Objects + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/list + handlers.insert("GET https://www.googleapis.com/storage/v1/b/{bucket}/o", (url, params, req) -> { + String prefix = params.get("prefix"); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.field("kind", "storage#objects"); + builder.startArray("items"); + for (Map.Entry o : objectsNames.entrySet()) { + if (prefix != null && o.getValue().startsWith(prefix) == false) { + continue; + } + buildObjectResource(builder, bucket, o.getValue(), o.getKey(), objectsContent.get(o.getKey()).length); + } + builder.endArray(); + builder.endObject(); + return newMockResponse().setContent(builder.string()); + } catch (IOException e) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()); + } + }); + + // Delete Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/delete + handlers.insert("DELETE https://www.googleapis.com/storage/v1/b/{bucket}/o/{object}", (url, params, req) -> { + String name = params.get("object"); + if (Strings.hasText(name) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + + String objectId = null; + for (Map.Entry object : objectsNames.entrySet()) { + if (object.getValue().equals(name)) { + objectId = object.getKey(); + break; + } + } + + if (objectId != null) { + objectsNames.remove(objectId); + objectsContent.remove(objectId); + return newMockResponse().setStatusCode(RestStatus.NO_CONTENT.getStatus()); + } + return newMockError(RestStatus.NOT_FOUND, "object not found"); + }); + + // Copy Object + // + // https://cloud.google.com/storage/docs/json_api/v1/objects/copy + handlers.insert("POST https://www.googleapis.com/storage/v1/b/{srcBucket}/o/{srcObject}/copyTo/b/{destBucket}/o/{destObject}", + (url, params, req) -> { + String source = params.get("srcObject"); + if (Strings.hasText(source) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); + } + + String dest = params.get("destObject"); + if (Strings.hasText(dest) == false) { + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); + } + + String srcObjectId = null; + for (Map.Entry object : objectsNames.entrySet()) { + if (object.getValue().equals(source)) { + srcObjectId = object.getKey(); + break; + } + } + + if (srcObjectId == null) { + return newMockError(RestStatus.NOT_FOUND, "source object not found"); + } + + byte[] content = objectsContent.get(srcObjectId); + if (content == null) { + return newMockError(RestStatus.NOT_FOUND, "source content can not be found"); + } + + String destObjectId = String.valueOf(objectsCount.getAndIncrement()); + objectsNames.put(destObjectId, dest); + objectsContent.put(destObjectId, content); + + return newMockResponse().setContent(buildObjectResource(bucket, dest, destObjectId, content.length)); + }); + + // Batch + // + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch + handlers.insert("POST https://www.googleapis.com/batch", (url, params, req) -> { + List responses = new ArrayList<>(); + + // A batch request body looks like this: + // + // --__END_OF_PART__ + // Content-Length: 71 + // Content-Type: application/http + // content-id: 1 + // content-transfer-encoding: binary + // + // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/foo%2Ftest + // + // + // --__END_OF_PART__ + // Content-Length: 71 + // Content-Type: application/http + // content-id: 2 + // content-transfer-encoding: binary + // + // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/bar%2Ftest + // + // + // --__END_OF_PART__-- + + // Here we simply process the request body line by line and delegate to other handlers + // if possible. + try (ByteArrayOutputStream os = new ByteArrayOutputStream((int) req.getContentLength())) { + req.getStreamingContent().writeTo(os); + + Streams.readAllLines(new ByteArrayInputStream(os.toByteArray()), new Callback() { + @Override + public void handle(String line) { + Handler handler = handlers.retrieve(line, params); + if (handler != null) { + try { + responses.add(handler.execute(line, params, req)); + } catch (IOException e) { + responses.add(newMockError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + } + } + } + }); + } + + // Now we can build the response + String boundary = "__END_OF_PART__"; + String sep = "--"; + String line = "\r\n"; + + StringBuilder builder = new StringBuilder(); + for (MockLowLevelHttpResponse resp : responses) { + builder.append(sep).append(boundary).append(line); + builder.append(line); + builder.append("HTTP/1.1 ").append(resp.getStatusCode()).append(' ').append(resp.getReasonPhrase()).append(line); + builder.append("Content-Length: ").append(resp.getContentLength()).append(line); + builder.append(line); + } + builder.append(line); + builder.append(sep).append(boundary).append(sep); + + return newMockResponse().setContentType("multipart/mixed; boundary=" + boundary).setContent(builder.toString()); + }); + } + + @Override + public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { + return new MockLowLevelHttpRequest() { + @Override + public LowLevelHttpResponse execute() throws IOException { + String rawPath = url; + Map params = new HashMap<>(); + + int pathEndPos = url.indexOf('?'); + if (pathEndPos != -1) { + rawPath = url.substring(0, pathEndPos); + RestUtils.decodeQueryString(url, pathEndPos + 1, params); + } + + Handler handler = handlers.retrieve(method + " " + rawPath, params); + if (handler != null) { + return handler.execute(rawPath, params, this); + } + return newMockError(RestStatus.INTERNAL_SERVER_ERROR, "Unable to handle request [method=" + method + ", url=" + url + "]"); + } + }; + } + + private static MockLowLevelHttpResponse newMockResponse() { + return new MockLowLevelHttpResponse() + .setContentType(Json.MEDIA_TYPE) + .setStatusCode(RestStatus.OK.getStatus()) + .setReasonPhrase(RestStatus.OK.name()); + } + + private static MockLowLevelHttpResponse newMockError(RestStatus status, String message) { + MockLowLevelHttpResponse response = newMockResponse().setStatusCode(status.getStatus()).setReasonPhrase(status.name()); + try { + response.setContent(buildErrorResource(status, message)); + } catch (IOException e) { + response.setContent("Failed to build error resource [" + message + "] because of: " + e.getMessage()); + } + return response; + } + + /** + * Storage Error JSON representation + */ + private static String buildErrorResource(RestStatus status, String message) throws IOException { + return jsonBuilder() + .startObject() + .startObject("error") + .field("code", status.getStatus()) + .field("message", message) + .startArray("errors") + .startObject() + .field("domain", "global") + .field("reason", status.toString()) + .field("message", message) + .endObject() + .endArray() + .endObject() + .endObject() + .string(); + } + + /** + * Storage Bucket JSON representation as defined in + * https://cloud.google.com/storage/docs/json_api/v1/bucket#resource + */ + private static String buildBucketResource(String name) throws IOException { + return jsonBuilder().startObject() + .field("kind", "storage#bucket") + .field("id", name) + .endObject() + .string(); + } + + /** + * Storage Object JSON representation as defined in + * https://cloud.google.com/storage/docs/json_api/v1/objects#resource + */ + private static XContentBuilder buildObjectResource(XContentBuilder builder, String bucket, String name, String id, int size) + throws IOException { + return builder.startObject() + .field("kind", "storage#object") + .field("id", String.join("/", bucket, name, id)) + .field("name", name) + .field("size", String.valueOf(size)) + .endObject(); + } + + private static String buildObjectResource(String bucket, String name, String id, int size) throws IOException { + return buildObjectResource(jsonBuilder(), bucket, name, id, size).string(); + } + + interface Handler { + MockLowLevelHttpResponse execute(String url, Map params, MockLowLevelHttpRequest request) throws IOException; + } + + /** + * Instanciates a mocked Storage client for tests. + */ + public static Storage newStorage(String bucket, String applicationName) { + return new Storage.Builder(new MockHttpTransport(bucket), new JacksonFactory(), null) + .setApplicationName(applicationName) + .build(); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java new file mode 100644 index 00000000000..c5b57ba6cd6 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.google.api.services.storage.Storage; +import org.elasticsearch.common.blobstore.gcs.MockHttpTransport; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugin.repository.gcs.GoogleCloudStorageModule; +import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.ESBlobStoreRepositoryIntegTestCase; +import org.junit.BeforeClass; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { + + private static final String BUCKET = "gcs-repository-test"; + + // Static storage client shared among all nodes in order to act like a remote repository service: + // all nodes must see the same content + private static final AtomicReference storage = new AtomicReference<>(); + + @Override + protected Collection> nodePlugins() { + return pluginList(MockGoogleCloudStoragePlugin.class); + } + + @Override + protected void createTestRepository(String name) { + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(GoogleCloudStorageRepository.TYPE) + .setSettings(Settings.builder() + .put("bucket", BUCKET) + .put("base_path", GoogleCloudStorageBlobStoreRepositoryTests.class.getSimpleName()) + .put("service_account", "_default_") + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + } + + @BeforeClass + public static void setUpStorage() { + storage.set(MockHttpTransport.newStorage(BUCKET, GoogleCloudStorageBlobStoreRepositoryTests.class.getName())); + } + + public static class MockGoogleCloudStoragePlugin extends GoogleCloudStoragePlugin { + + public MockGoogleCloudStoragePlugin() { + } + + @Override + public Collection nodeModules() { + return Collections.singletonList(new MockGoogleCloudStorageModule()); + } + } + + public static class MockGoogleCloudStorageModule extends GoogleCloudStorageModule { + @Override + protected void configure() { + bind(GoogleCloudStorageService.class).to(MockGoogleCloudStorageService.class).asEagerSingleton(); + } + } + + public static class MockGoogleCloudStorageService implements GoogleCloudStorageService { + + @Override + public Storage createClient(String serviceAccount, String application, TimeValue connectTimeout, TimeValue readTimeout) throws + Exception { + return storage.get(); + } + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryRestIT.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryRestIT.java new file mode 100644 index 00000000000..18862d05aa0 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryRestIT.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.RestTestCandidate; +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +public class GoogleCloudStorageRepositoryRestIT extends ESRestTestCase { + + public GoogleCloudStorageRepositoryRestIT(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(0, 1); + } +} + diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yaml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yaml new file mode 100644 index 00000000000..a37fb779549 --- /dev/null +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yaml @@ -0,0 +1,13 @@ +# Integration tests for Repository GCS component +# +"Repository GCS loaded": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: repository-gcs } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java index c5c617e8591..4cb8e4d3abb 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java @@ -316,7 +316,7 @@ public class RepositoryS3SettingsTests extends ESTestCase { "Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb"); // chunk > 5tb should fail internalTestInvalidChunkBufferSizeSettings(new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(6, ByteSizeUnit.TB), - "Failed to parse value [6tb] for setting [chunk_size] must be =< 5tb"); + "Failed to parse value [6tb] for setting [chunk_size] must be <= 5tb"); } private Settings buildSettings(Settings... global) { diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml index 00c6e814eed..54a79ac1e32 100644 --- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml @@ -102,7 +102,7 @@ - match: {batches: 1} --- -"Setting bogus ctx is an error": +"Setting bogus context is an error": - do: index: index: twitter @@ -113,7 +113,7 @@ indices.refresh: {} - do: - catch: /Invalid fields added to ctx \[junk\]/ + catch: /Invalid fields added to context \[junk\]/ update_by_query: index: twitter body: diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index bd5cd499015..07fea76bd8b 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -289,6 +289,10 @@ fi install_and_check_plugin repository azure azure-storage-*.jar } +@test "[$GROUP] install repository-gcs plugin" { + install_and_check_plugin repository gcs google-api-services-storage-*.jar +} + @test "[$GROUP] install repository-s3 plugin" { install_and_check_plugin repository s3 aws-java-sdk-core-*.jar } @@ -387,6 +391,10 @@ fi remove_plugin repository-azure } +@test "[$GROUP] remove repository-gcs plugin" { + remove_plugin repository-gcs +} + @test "[$GROUP] remove repository-hdfs plugin" { remove_plugin repository-hdfs } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/modules.bash b/qa/vagrant/src/test/resources/packaging/scripts/modules.bash index bd6da680da9..2e80fd648f3 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/modules.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/modules.bash @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # This file contains some utilities to test the elasticsearch scripts, # the .deb/.rpm packages and the SysV/Systemd scripts. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash index 72c59c39324..ee6e491d169 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # This file contains some utilities to test the elasticsearch scripts with # the .deb/.rpm packages. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index 09d0190695e..5f50dfc2850 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # This file contains some utilities to test the elasticsearch scripts, # the .deb/.rpm packages and the SysV/Systemd scripts. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash index 4f1e574b905..afae7439057 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # This file contains some utilities to test the elasticsearch scripts, # the .deb/.rpm packages and the SysV/Systemd scripts. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash index 277eee60f1a..798ec6c2997 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # This file contains some utilities to test the elasticsearch scripts, # the .deb/.rpm packages and the SysV/Systemd scripts. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json new file mode 100644 index 00000000000..981aea79a1c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -0,0 +1,207 @@ +{ + "delete_by_query": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html", + "methods": ["POST"], + "url": { + "path": "/{index}/_delete_by_query", + "paths": ["/{index}/_delete_by_query", "/{index}/{type}/_delete_by_query"], + "comment": "most things below this are just copied from search.json", + "parts": { + "index": { + "required" : true, + "type" : "list", + "description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + }, + "type": { + "type" : "list", + "description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types" + } + }, + "params": { + "analyzer": { + "type" : "string", + "description" : "The analyzer to use for the query string" + }, + "analyze_wildcard": { + "type" : "boolean", + "description" : "Specify whether wildcard and prefix queries should be analyzed (default: false)" + }, + "default_operator": { + "type" : "enum", + "options" : ["AND","OR"], + "default" : "OR", + "description" : "The default operator for query string query (AND or OR)" + }, + "df": { + "type" : "string", + "description" : "The field to use as default where no field prefix is given in the query string" + }, + "explain": { + "type" : "boolean", + "description" : "Specify whether to return detailed information about score computation as part of a hit" + }, + "fields": { + "type" : "list", + "description" : "A comma-separated list of fields to return as part of a hit" + }, + "fielddata_fields": { + "type" : "list", + "description" : "A comma-separated list of fields to return as the field data representation of a field for each hit" + }, + "from": { + "type" : "number", + "description" : "Starting offset (default: 0)" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "conflicts": { + "note": "This is not copied from search", + "type" : "enum", + "options": ["abort", "proceed"], + "default": "abort", + "description" : "What to do when the delete-by-query hits version conflicts?" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "lenient": { + "type" : "boolean", + "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" + }, + "lowercase_expanded_terms": { + "type" : "boolean", + "description" : "Specify whether query terms should be lowercased" + }, + "preference": { + "type" : "string", + "description" : "Specify the node or shard the operation should be performed on (default: random)" + }, + "q": { + "type" : "string", + "description" : "Query in the Lucene query string syntax" + }, + "routing": { + "type" : "list", + "description" : "A comma-separated list of specific routing values" + }, + "scroll": { + "type" : "duration", + "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" + }, + "search_type": { + "type" : "enum", + "options" : ["query_then_fetch", "dfs_query_then_fetch"], + "description" : "Search operation type" + }, + "search_timeout": { + "type" : "time", + "description" : "Explicit timeout for each search request. Defaults to no timeout." + }, + "size": { + "type" : "number", + "description" : "Number of hits to return (default: 10)" + }, + "sort": { + "type" : "list", + "description" : "A comma-separated list of : pairs" + }, + "_source": { + "type" : "list", + "description" : "True or false to return the _source field or not, or a list of fields to return" + }, + "_source_exclude": { + "type" : "list", + "description" : "A list of fields to exclude from the returned _source field" + }, + "_source_include": { + "type" : "list", + "description" : "A list of fields to extract and return from the _source field" + }, + "terminate_after": { + "type" : "number", + "description" : "The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early." + }, + "stats": { + "type" : "list", + "description" : "Specific 'tag' of the request for logging and statistical purposes" + }, + "suggest_field": { + "type" : "string", + "description" : "Specify which field to use for suggestions" + }, + "suggest_mode": { + "type" : "enum", + "options" : ["missing", "popular", "always"], + "default" : "missing", + "description" : "Specify suggest mode" + }, + "suggest_size": { + "type" : "number", + "description" : "How many suggestions to return in response" + }, + "suggest_text": { + "type" : "text", + "description" : "The source text for which the suggestions should be returned" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "track_scores": { + "type" : "boolean", + "description": "Whether to calculate and return scores even if they are not used for sorting" + }, + "version": { + "type" : "boolean", + "description" : "Specify whether to return document version as part of a hit" + }, + "request_cache": { + "type" : "boolean", + "description" : "Specify if request cache should be used for this request or not, defaults to index level setting" + }, + "refresh": { + "type" : "boolean", + "description" : "Should the effected indexes be refreshed?" + }, + "timeout": { + "type" : "time", + "default": "1m", + "description" : "Time each individual bulk request should wait for shards that are unavailable." + }, + "consistency": { + "type" : "enum", + "options" : ["one", "quorum", "all"], + "description" : "Explicit write consistency setting for the operation" + }, + "scroll_size": { + "type": "integer", + "defaut_value": 100, + "description": "Size on the scroll request powering the update_by_query" + }, + "wait_for_completion": { + "type" : "boolean", + "default": false, + "description" : "Should the request should block until the delete-by-query is complete." + }, + "requests_per_second": { + "type": "float", + "default": 0, + "description": "The throttle for this request in sub-requests per second. 0 means set no throttle." + } + } + }, + "body": { + "description": "The search definition using the Query DSL", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json index 4903c7598c3..921249ab4cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.rethrottle.json @@ -4,7 +4,7 @@ "methods": ["POST"], "url": { "path": "/_reindex/{task_id}/_rethrottle", - "paths": ["/_reindex/{task_id}/_rethrottle", "/_update_by_query/{task_id}/_rethrottle"], + "paths": ["/_reindex/{task_id}/_rethrottle", "/_update_by_query/{task_id}/_rethrottle", "/_delete_by_query/{task_id}/_rethrottle"], "parts": { "task_id": { "type": "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yaml index 4769465eb1d..2617f76941c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yaml @@ -1,9 +1,5 @@ --- "Discovery stats": - - skip: - version: "5.0.0 - " - reason: Tracked in issue 18433 - - do: cluster.state: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index 71d1a1e7ca2..c35e79e6cfe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -117,6 +117,33 @@ setup: - match: { aggregations.ip_terms.buckets.1.doc_count: 1 } + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "include" : [ "127.0.0.1" ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_terms.buckets: 1 } + + - match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : [ "127.0.0.1" ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_terms.buckets: 1 } + + - match: { aggregations.ip_terms.buckets.0.key: "::1" } + + - do: + catch: request + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } } + + + --- "Boolean test": - do: @@ -300,4 +327,27 @@ setup: - match: { aggregations.date_terms.buckets.1.key_as_string: "2014-09-01T00:00:00.000Z" } - match: { aggregations.date_terms.buckets.1.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "include" : [ "2016-05-03" ] } } } } + - match: { hits.total: 3 } + + - length: { aggregations.date_terms.buckets: 1 } + + - match: { aggregations.date_terms.buckets.0.key_as_string: "2016-05-03T00:00:00.000Z" } + + - match: { aggregations.date_terms.buckets.0.doc_count: 2 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "exclude" : [ "2016-05-03" ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.date_terms.buckets: 1 } + + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } + + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml index 45c042baea4..a708ff19d7e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yaml @@ -121,3 +121,28 @@ - is_false: aggregations.ip_terms.buckets.0.key_as_string - match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "query" : { "exists" : { "field" : "ip" } }, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "min_doc_count" : 1, "include" : [ "::1" ] } } } } + + - match: { hits.total: 1 } + + - length: { aggregations.ip_terms.buckets: 1 } + + - match: { aggregations.ip_terms.buckets.0.key: "::1" } + + - do: + search: + body: { "query" : { "exists" : { "field" : "ip" } }, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "min_doc_count" : 1, "exclude" : [ "::1" ] } } } } + + - match: { hits.total: 1 } + + - length: { aggregations.ip_terms.buckets: 0 } + + - do: + catch: request + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "exclude" : "127.*" } } } } + + diff --git a/settings.gradle b/settings.gradle index 3a8b0f66210..88217a9dde5 100644 --- a/settings.gradle +++ b/settings.gradle @@ -37,6 +37,7 @@ List projects = [ 'plugins:mapper-murmur3', 'plugins:mapper-size', 'plugins:repository-azure', + 'plugins:repository-gcs', 'plugins:repository-hdfs', 'plugins:repository-s3', 'plugins:jvm-example', diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java similarity index 94% rename from test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java rename to test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 291d00a8dde..8462cf007f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test; +package org.elasticsearch.repositories; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.io.InputStream; @@ -32,9 +33,9 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import static org.elasticsearch.test.ESBlobStoreTestCase.writeRandomBlob; -import static org.elasticsearch.test.ESBlobStoreTestCase.randomBytes; -import static org.elasticsearch.test.ESBlobStoreTestCase.readBlobFully; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.writeRandomBlob; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.readBlobFully; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java similarity index 91% rename from test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java rename to test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java index dc49683de63..2ffd30fa470 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test; +package org.elasticsearch.repositories; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; import java.util.HashSet; @@ -59,7 +60,8 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase String snapshotName = randomAsciiName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); - assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexNames)); + assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true).setIndices(indexNames)); List deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames); if (deleteIndices.size() > 0) { @@ -99,6 +101,9 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase for (int i = 0; i < indexCount; i++) { assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]); } + + logger.info("--> delete snapshot {}:{}", repoName, snapshotName); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get()); } public void testMultipleSnapshotAndRollback() throws Exception { @@ -130,7 +135,8 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase // Check number of documents in this iteration docCounts[i] = (int) client().prepareSearch(indexName).setSize(0).get().getHits().totalHits(); logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]); - assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName + "-" + i).setWaitForCompletion(true).setIndices(indexName)); + assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName + "-" + i) + .setWaitForCompletion(true).setIndices(indexName)); } int restoreOperations = randomIntBetween(1, 3); @@ -142,10 +148,17 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase assertAcked(client().admin().indices().prepareClose(indexName)); logger.info("--> restore index from the snapshot"); - assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore).setWaitForCompletion(true)); + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore) + .setWaitForCompletion(true)); + ensureGreen(); assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCounts[iterationToRestore]); } + + for (int i = 0; i < iterationCount; i++) { + logger.info("--> delete snapshot {}:{}", repoName, snapshotName + "-" + i); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName + "-" + i).get()); + } } protected void addRandomDocuments(String name, int numDocs) throws ExecutionException, InterruptedException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java similarity index 97% rename from test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreTestCase.java rename to test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index 80432d628ef..be7431795b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -16,12 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test; +package org.elasticsearch.repositories; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; import org.junit.Test; import java.io.IOException;