Merge branch 'master' into netty4
* master: Fix REST test documentation [Test] move methods from bwc test to test package for use in plugins (#19738) package-info.java should be in src/main only. Split regular histograms from date histograms. #19551 Tighten up concurrent store metadata listing and engine writes (#19684) Plugins: Make NamedWriteableRegistry immutable and add extenion point for named writeables Add documentation for the 'elasticsearch-translog' tool [TEST] Increase time waiting for all shards to move off/on to a node Fixes the active shard count check in the case of (#19760) Fixes cat tasks operation in detailed mode ignore some docker craziness in scccomp environment checks
This commit is contained in:
commit
e74d02138f
|
@ -285,18 +285,18 @@ REST tests use the following command:
|
|||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
-Dtests.class=org.elasticsearch.test.rest.*Yaml*IT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.class=org.elasticsearch.test.rest.*Yaml*IT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
`RestIT` are the executable test classes that runs all the
|
||||
`*Yaml*IT` are the executable test classes that runs all the
|
||||
yaml suites available within the `rest-api-spec` folder.
|
||||
|
||||
The REST tests support all the options provided by the randomized runner, plus the following:
|
||||
|
|
|
@ -160,12 +160,15 @@ public final class ActiveShardCount implements Writeable {
|
|||
* to meet the required shard count represented by this instance.
|
||||
*/
|
||||
public boolean enoughShardsActive(final IndexShardRoutingTable shardRoutingTable) {
|
||||
final int activeShardCount = shardRoutingTable.activeShards().size();
|
||||
if (this == ActiveShardCount.ALL) {
|
||||
return shardRoutingTable.allShardsStarted();
|
||||
// adding 1 for the primary in addition to the total number of replicas,
|
||||
// which gives us the total number of shard copies
|
||||
return activeShardCount == shardRoutingTable.replicaShards().size() + 1;
|
||||
} else if (this == ActiveShardCount.DEFAULT) {
|
||||
return shardRoutingTable.primaryShard().started();
|
||||
return activeShardCount >= 1;
|
||||
} else {
|
||||
return shardRoutingTable.activeShards().size() >= value;
|
||||
return activeShardCount >= value;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -200,6 +200,7 @@ final class Seccomp {
|
|||
static final int SECCOMP_RET_ALLOW = 0x7FFF0000;
|
||||
|
||||
// some errno constants for error checking/handling
|
||||
static final int EPERM = 0x01;
|
||||
static final int EACCES = 0x0D;
|
||||
static final int EFAULT = 0x0E;
|
||||
static final int EINVAL = 0x16;
|
||||
|
@ -275,10 +276,23 @@ final class Seccomp {
|
|||
|
||||
// check that unimplemented syscalls actually return ENOSYS
|
||||
// you never know (e.g. https://code.google.com/p/chromium/issues/detail?id=439795)
|
||||
if (linux_syscall(999) >= 0 || Native.getLastError() != ENOSYS) {
|
||||
if (linux_syscall(999) >= 0) {
|
||||
throw new UnsupportedOperationException("seccomp unavailable: your kernel is buggy and you should upgrade");
|
||||
}
|
||||
|
||||
switch (Native.getLastError()) {
|
||||
case ENOSYS:
|
||||
break; // ok
|
||||
case EPERM:
|
||||
// NOT ok, but likely a docker container
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("syscall(BOGUS) bogusly gets EPERM instead of ENOSYS");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("seccomp unavailable: your kernel is buggy and you should upgrade");
|
||||
}
|
||||
|
||||
// try to check system calls really are who they claim
|
||||
// you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57)
|
||||
final int bogusArg = 0xf7a46a5c;
|
||||
|
|
|
@ -109,7 +109,6 @@ public abstract class TransportClient extends AbstractClient {
|
|||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
|
||||
final NetworkService networkService = new NetworkService(settings, Collections.emptyList());
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
try {
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>();
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>();
|
||||
|
@ -120,14 +119,21 @@ public abstract class TransportClient extends AbstractClient {
|
|||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
|
||||
|
||||
NetworkModule networkModule = new NetworkModule(networkService, settings, true);
|
||||
SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(networkModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.createGuiceModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
|
||||
modules.add(networkModule);
|
||||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry, true, pluginsService.filterPlugins(SearchPlugin.class)));
|
||||
modules.add(searchModule);
|
||||
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class));
|
||||
modules.add(actionModule);
|
||||
|
@ -143,6 +149,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
b.bind(BigArrays.class).toInstance(bigArrays);
|
||||
b.bind(PluginsService.class).toInstance(pluginsService);
|
||||
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
|
||||
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
}));
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
|
||||
/**
|
||||
* A collection of static methods for creating ShapeBuilders.
|
||||
*/
|
||||
|
@ -140,15 +139,15 @@ public class ShapeBuilders {
|
|||
return new EnvelopeBuilder(topLeft, bottomRight);
|
||||
}
|
||||
|
||||
public static void register(NamedWriteableRegistry namedWriteableRegistry) {
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, CircleBuilder.TYPE.shapeName(), CircleBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, EnvelopeBuilder.TYPE.shapeName(), EnvelopeBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, MultiPointBuilder.TYPE.shapeName(), MultiPointBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, LineStringBuilder.TYPE.shapeName(), LineStringBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, MultiLineStringBuilder.TYPE.shapeName(), MultiLineStringBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, PolygonBuilder.TYPE.shapeName(), PolygonBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, MultiPolygonBuilder.TYPE.shapeName(), MultiPolygonBuilder::new);
|
||||
namedWriteableRegistry.register(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new);
|
||||
public static void register(List<Entry> namedWriteables) {
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, CircleBuilder.TYPE.shapeName(), CircleBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, EnvelopeBuilder.TYPE.shapeName(), EnvelopeBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, MultiPointBuilder.TYPE.shapeName(), MultiPointBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, LineStringBuilder.TYPE.shapeName(), LineStringBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, MultiLineStringBuilder.TYPE.shapeName(), MultiLineStringBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, PolygonBuilder.TYPE.shapeName(), PolygonBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, MultiPolygonBuilder.TYPE.shapeName(), MultiPolygonBuilder::new));
|
||||
namedWriteables.add(new Entry(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,70 +19,102 @@
|
|||
|
||||
package org.elasticsearch.common.io.stream;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
/**
|
||||
* Registry for {@link NamedWriteable} objects. Allows to register and retrieve prototype instances of writeable objects
|
||||
* given their name.
|
||||
* A registry for {@link org.elasticsearch.common.io.stream.Writeable.Reader} readers of {@link NamedWriteable}.
|
||||
*
|
||||
* The registration is keyed by the combination of the category class of {@link NamedWriteable}, and a name unique
|
||||
* to that category.
|
||||
*/
|
||||
public class NamedWriteableRegistry {
|
||||
|
||||
private final Map<Class<?>, InnerRegistry<?>> registry = new HashMap<>();
|
||||
/** An entry in the registry, made up of a category class and name, and a reader for that category class. */
|
||||
public static class Entry {
|
||||
|
||||
/**
|
||||
* Register a {@link NamedWriteable} given its category, its name, and a function to read it from the stream.
|
||||
*
|
||||
* This method suppresses the rawtypes warning because it intentionally using NamedWriteable instead of {@code NamedWriteable<T>} so it
|
||||
* is easier to use and because we might be able to drop the type parameter from NamedWriteable entirely some day.
|
||||
*/
|
||||
public synchronized <T extends NamedWriteable> void register(Class<T> categoryClass, String name,
|
||||
Writeable.Reader<? extends T> reader) {
|
||||
@SuppressWarnings("unchecked")
|
||||
InnerRegistry<T> innerRegistry = (InnerRegistry<T>) registry.get(categoryClass);
|
||||
if (innerRegistry == null) {
|
||||
innerRegistry = new InnerRegistry<>(categoryClass);
|
||||
registry.put(categoryClass, innerRegistry);
|
||||
/** The superclass of a {@link NamedWriteable} which will be read by {@link #reader}. */
|
||||
public final Class<?> categoryClass;
|
||||
|
||||
/** A name for the writeable which is unique to the {@link #categoryClass}. */
|
||||
public final String name;
|
||||
|
||||
/** A reader captability of reading*/
|
||||
public final Writeable.Reader<?> reader;
|
||||
|
||||
/** Creates a new entry which can be stored by the registry. */
|
||||
public <T extends NamedWriteable> Entry(Class<T> categoryClass, String name, Writeable.Reader<? extends T> reader) {
|
||||
this.categoryClass = Objects.requireNonNull(categoryClass);
|
||||
this.name = Objects.requireNonNull(name);
|
||||
this.reader = Objects.requireNonNull(reader);
|
||||
}
|
||||
innerRegistry.register(name, reader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a prototype of the {@link NamedWriteable} object identified by the name provided as argument and its category
|
||||
* The underlying data of the registry maps from the category to an inner
|
||||
* map of name unique to that category, to the actual reader.
|
||||
*/
|
||||
public synchronized <T> Writeable.Reader<? extends T> getReader(Class<T> categoryClass, String name) {
|
||||
@SuppressWarnings("unchecked")
|
||||
InnerRegistry<T> innerRegistry = (InnerRegistry<T>)registry.get(categoryClass);
|
||||
if (innerRegistry == null) {
|
||||
throw new IllegalArgumentException("unknown named writeable category [" + categoryClass.getName() + "]");
|
||||
private final Map<Class<?>, Map<String, Writeable.Reader<?>>> registry;
|
||||
|
||||
/**
|
||||
* Constructs a new registry from the given entries.
|
||||
*/
|
||||
public NamedWriteableRegistry(List<Entry> entries) {
|
||||
if (entries.isEmpty()) {
|
||||
registry = Collections.emptyMap();
|
||||
return;
|
||||
}
|
||||
return innerRegistry.getReader(name);
|
||||
entries = new ArrayList<>(entries);
|
||||
entries.sort((e1, e2) -> e1.categoryClass.getName().compareTo(e2.categoryClass.getName()));
|
||||
|
||||
Map<Class<?>, Map<String, Writeable.Reader<?>>> registry = new HashMap<>();
|
||||
Map<String, Writeable.Reader<?>> readers = null;
|
||||
Class currentCategory = null;
|
||||
for (Entry entry : entries) {
|
||||
if (currentCategory != entry.categoryClass) {
|
||||
if (currentCategory != null) {
|
||||
// we've seen the last of this category, put it into the big map
|
||||
registry.put(currentCategory, Collections.unmodifiableMap(readers));
|
||||
}
|
||||
readers = new HashMap<>();
|
||||
currentCategory = entry.categoryClass;
|
||||
}
|
||||
|
||||
Writeable.Reader<?> oldReader = readers.put(entry.name, entry.reader);
|
||||
if (oldReader != null) {
|
||||
throw new IllegalArgumentException("NamedWriteable [" + currentCategory.getName() + "][" + entry.name + "]" +
|
||||
" is already registered for [" + oldReader.getClass().getName() + "]," +
|
||||
" cannot register [" + entry.reader.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
// handle the last category
|
||||
registry.put(currentCategory, Collections.unmodifiableMap(readers));
|
||||
|
||||
this.registry = Collections.unmodifiableMap(registry);
|
||||
}
|
||||
|
||||
private static class InnerRegistry<T> {
|
||||
|
||||
private final Map<String, Writeable.Reader<? extends T>> registry = new HashMap<>();
|
||||
private final Class<T> categoryClass;
|
||||
|
||||
private InnerRegistry(Class<T> categoryClass) {
|
||||
this.categoryClass = categoryClass;
|
||||
/**
|
||||
* Returns a reader for a {@link NamedWriteable} object identified by the
|
||||
* name provided as argument and its category.
|
||||
*/
|
||||
public <T> Writeable.Reader<? extends T> getReader(Class<T> categoryClass, String name) {
|
||||
Map<String, Writeable.Reader<?>> readers = registry.get(categoryClass);
|
||||
if (readers == null) {
|
||||
throw new IllegalArgumentException("Unknown NamedWriteable category [" + categoryClass.getName() + "]");
|
||||
}
|
||||
|
||||
private void register(String name, Writeable.Reader<? extends T> reader) {
|
||||
Writeable.Reader<? extends T> existingReader = registry.get(name);
|
||||
if (existingReader != null) {
|
||||
throw new IllegalArgumentException(
|
||||
"named writeable [" + categoryClass.getName() + "][" + name + "] is already registered by [" + reader + "]");
|
||||
}
|
||||
registry.put(name, reader);
|
||||
}
|
||||
|
||||
private Writeable.Reader<? extends T> getReader(String name) {
|
||||
Writeable.Reader<? extends T> reader = registry.get(name);
|
||||
if (reader == null) {
|
||||
throw new IllegalArgumentException("unknown named writeable [" + categoryClass.getName() + "][" + name + "]");
|
||||
}
|
||||
return reader;
|
||||
@SuppressWarnings("unchecked")
|
||||
Writeable.Reader<? extends T> reader = (Writeable.Reader<? extends T>)readers.get(name);
|
||||
if (reader == null) {
|
||||
throw new IllegalArgumentException("Unknown NamedWriteable [" + categoryClass.getName() + "][" + name + "]");
|
||||
}
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationTask;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand;
|
||||
|
@ -31,6 +34,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.util.Providers;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -70,21 +74,18 @@ public class NetworkModule extends AbstractModule {
|
|||
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
|
||||
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
|
||||
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
private final List<Entry> namedWriteables = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
* @param networkService A constructed network service object to bind.
|
||||
* @param settings The settings for the node
|
||||
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
|
||||
* @param namedWriteableRegistry registry for named writeables for use during streaming
|
||||
*/
|
||||
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient,
|
||||
NamedWriteableRegistry namedWriteableRegistry) {
|
||||
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
|
||||
this.networkService = networkService;
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
registerTransportService("default", TransportService.class);
|
||||
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
|
||||
registerTaskStatus(ReplicationTask.Status.NAME, ReplicationTask.Status::new);
|
||||
|
@ -116,7 +117,7 @@ public class NetworkModule extends AbstractModule {
|
|||
}
|
||||
|
||||
public void registerTaskStatus(String name, Writeable.Reader<? extends Task.Status> reader) {
|
||||
namedWriteableRegistry.register(Task.Status.class, name, reader);
|
||||
namedWriteables.add(new Entry(Task.Status.class, name, reader));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -132,7 +133,7 @@ public class NetworkModule extends AbstractModule {
|
|||
private <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
|
||||
ParseField commandName) {
|
||||
allocationCommandRegistry.register(parser, commandName);
|
||||
namedWriteableRegistry.register(AllocationCommand.class, commandName.getPreferredName(), reader);
|
||||
namedWriteables.add(new Entry(AllocationCommand.class, commandName.getPreferredName(), reader));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,10 +143,13 @@ public class NetworkModule extends AbstractModule {
|
|||
return allocationCommandRegistry;
|
||||
}
|
||||
|
||||
public List<Entry> getNamedWriteables() {
|
||||
return namedWriteables;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
|
||||
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
|
||||
|
|
|
@ -35,24 +35,9 @@ public abstract class Rounding implements Streamable {
|
|||
public abstract byte id();
|
||||
|
||||
/**
|
||||
* Given a value, compute a key that uniquely identifies the rounded value although it is not necessarily equal to the rounding value itself.
|
||||
* Rounds the given value.
|
||||
*/
|
||||
public abstract long roundKey(long value);
|
||||
|
||||
/**
|
||||
* Compute the rounded value given the key that identifies it.
|
||||
*/
|
||||
public abstract long valueForKey(long key);
|
||||
|
||||
/**
|
||||
* Rounds the given value, equivalent to calling <code>roundValue(roundKey(value))</code>.
|
||||
*
|
||||
* @param value The value to round.
|
||||
* @return The rounded value.
|
||||
*/
|
||||
public final long round(long value) {
|
||||
return valueForKey(roundKey(value));
|
||||
}
|
||||
public abstract long round(long value);
|
||||
|
||||
/**
|
||||
* Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with
|
||||
|
@ -112,13 +97,8 @@ public abstract class Rounding implements Streamable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long roundKey(long value) {
|
||||
return roundKey(value, interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long valueForKey(long key) {
|
||||
return key * interval;
|
||||
public long round(long value) {
|
||||
return roundKey(value, interval) * interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -179,13 +159,8 @@ public abstract class Rounding implements Streamable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long roundKey(long utcMillis) {
|
||||
return rounding.roundKey((long) (factor * utcMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long valueForKey(long key) {
|
||||
return rounding.valueForKey(key);
|
||||
public long round(long utcMillis) {
|
||||
return rounding.round((long) (factor * utcMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -248,13 +223,8 @@ public abstract class Rounding implements Streamable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long roundKey(long value) {
|
||||
return rounding.roundKey(value - offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long valueForKey(long key) {
|
||||
return offset + rounding.valueForKey(key);
|
||||
public long round(long value) {
|
||||
return rounding.round(value - offset) + offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,9 @@ import java.io.IOException;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A rounding strategy for dates. It is typically used to group together dates
|
||||
* that are part of the same hour/day/month, taking into account time zones and
|
||||
* daylight saving times.
|
||||
*/
|
||||
public abstract class TimeZoneRounding extends Rounding {
|
||||
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
|
||||
|
@ -125,7 +128,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long roundKey(long utcMillis) {
|
||||
public long round(long utcMillis) {
|
||||
long rounded = field.roundFloor(utcMillis);
|
||||
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
|
||||
// in this case, we crossed a time zone transition. In some edge cases this will
|
||||
|
@ -138,20 +141,14 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
return rounded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long valueForKey(long time) {
|
||||
assert roundKey(time) == time;
|
||||
return time;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long utcMillis) {
|
||||
long floor = roundKey(utcMillis);
|
||||
long floor = round(utcMillis);
|
||||
// add one unit and round to get to next rounded value
|
||||
long next = roundKey(field.add(floor, 1));
|
||||
long next = round(field.add(floor, 1));
|
||||
if (next == floor) {
|
||||
// in rare case we need to add more than one unit
|
||||
next = roundKey(field.add(floor, 2));
|
||||
next = round(field.add(floor, 2));
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
@ -216,7 +213,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long roundKey(long utcMillis) {
|
||||
public long round(long utcMillis) {
|
||||
long timeLocal = timeZone.convertUTCToLocal(utcMillis);
|
||||
long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval);
|
||||
long roundedUTC;
|
||||
|
@ -225,7 +222,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
// check if we crossed DST transition, in this case we want the last rounded value before the transition
|
||||
long transition = timeZone.previousTransition(utcMillis);
|
||||
if (transition != utcMillis && transition > roundedUTC) {
|
||||
roundedUTC = roundKey(transition - 1);
|
||||
roundedUTC = round(transition - 1);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
|
@ -276,12 +273,6 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long valueForKey(long time) {
|
||||
assert roundKey(time) == time;
|
||||
return time;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long time) {
|
||||
long timeLocal = time;
|
||||
|
|
|
@ -654,7 +654,7 @@ public abstract class Engine implements Closeable {
|
|||
*
|
||||
* @param flushFirst indicates whether the engine should flush before returning the snapshot
|
||||
*/
|
||||
public abstract IndexCommit snapshotIndex(boolean flushFirst) throws EngineException;
|
||||
public abstract IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException;
|
||||
|
||||
/**
|
||||
* fail engine due to some error. the engine will also be closed.
|
||||
|
|
|
@ -852,7 +852,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
@Override
|
||||
public IndexCommit snapshotIndex(final boolean flushFirst) throws EngineException {
|
||||
public IndexCommit acquireIndexCommit(final boolean flushFirst) throws EngineException {
|
||||
// we have to flush outside of the readlock otherwise we might have a problem upgrading
|
||||
// the to a write lock when we fail the engine in this operation
|
||||
if (flushFirst) {
|
||||
|
|
|
@ -205,7 +205,7 @@ public class ShadowEngine extends Engine {
|
|||
}
|
||||
|
||||
@Override
|
||||
public IndexCommit snapshotIndex(boolean flushFirst) throws EngineException {
|
||||
public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException {
|
||||
throw new UnsupportedOperationException("Can not take snapshot from a shadow engine");
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,11 @@ package org.elasticsearch.index.shard;
|
|||
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
|
||||
import org.apache.lucene.index.SnapshotDeletionPolicy;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -29,6 +33,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -116,10 +121,12 @@ import org.elasticsearch.search.suggest.completion.CompletionStats;
|
|||
import org.elasticsearch.search.suggest.completion2x.Completion090PostingsFormat;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.channels.ClosedByInterruptException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
@ -789,15 +796,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
/**
|
||||
* Creates a new {@link IndexCommit} snapshot form the currently running engine. All resources referenced by this
|
||||
* commit won't be freed until the commit / snapshot is released via {@link #releaseSnapshot(IndexCommit)}.
|
||||
* commit won't be freed until the commit / snapshot is released via {@link #releaseIndexCommit(IndexCommit)}.
|
||||
*
|
||||
* @param flushFirst <code>true</code> if the index should first be flushed to disk / a low level lucene commit should be executed
|
||||
*/
|
||||
public IndexCommit snapshotIndex(boolean flushFirst) throws EngineException {
|
||||
public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException {
|
||||
IndexShardState state = this.state; // one time volatile read
|
||||
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
|
||||
if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
|
||||
return getEngine().snapshotIndex(flushFirst);
|
||||
return getEngine().acquireIndexCommit(flushFirst);
|
||||
} else {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
|
||||
}
|
||||
|
@ -805,13 +812,50 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
|
||||
/**
|
||||
* Releases a snapshot taken from {@link #snapshotIndex(boolean)} this must be called to release the resources
|
||||
* Releases a snapshot taken from {@link #acquireIndexCommit(boolean)} this must be called to release the resources
|
||||
* referenced by the given snapshot {@link IndexCommit}.
|
||||
*/
|
||||
public void releaseSnapshot(IndexCommit snapshot) throws IOException {
|
||||
public void releaseIndexCommit(IndexCommit snapshot) throws IOException {
|
||||
deletionPolicy.release(snapshot);
|
||||
}
|
||||
|
||||
/**
|
||||
* gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,
|
||||
* without having to worry about the current state of the engine and concurrent flushes.
|
||||
*
|
||||
* @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory
|
||||
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
|
||||
* unexpected exception when opening the index reading the segments file.
|
||||
* @throws IndexFormatTooOldException if the lucene index is too old to be opened.
|
||||
* @throws IndexFormatTooNewException if the lucene index is too new to be opened.
|
||||
* @throws FileNotFoundException if one or more files referenced by a commit are not present.
|
||||
* @throws NoSuchFileException if one or more files referenced by a commit are not present.
|
||||
*/
|
||||
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
|
||||
IndexCommit indexCommit = null;
|
||||
store.incRef();
|
||||
try {
|
||||
synchronized (mutex) {
|
||||
// if the engine is not running, we can access the store directly, but we need to make sure no one starts
|
||||
// the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized.
|
||||
// That can be done out of mutex, since the engine can be closed half way.
|
||||
Engine engine = getEngineOrNull();
|
||||
if (engine == null) {
|
||||
try (Lock ignored = store.directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
return store.getMetadata(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
indexCommit = deletionPolicy.snapshot();
|
||||
return store.getMetadata(indexCommit);
|
||||
} finally {
|
||||
store.decRef();
|
||||
if (indexCommit != null) {
|
||||
deletionPolicy.release(indexCommit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fails the shard and marks the shard store as corrupted if
|
||||
* <code>e</code> is caused by index corruption
|
||||
|
@ -1310,7 +1354,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
if ("checksum".equals(checkIndexOnStartup)) {
|
||||
// physical verification only: verify all checksums for the latest commit
|
||||
IOException corrupt = null;
|
||||
MetadataSnapshot metadata = store.getMetadata();
|
||||
MetadataSnapshot metadata = snapshotStoreMetadata();
|
||||
for (Map.Entry<String, StoreFileMetaData> entry : metadata.asMap().entrySet()) {
|
||||
try {
|
||||
Store.checkIntegrity(entry.getValue(), store.directory());
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.index.IndexCommit;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.NoLockFactory;
|
||||
|
@ -31,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -52,7 +50,7 @@ final class LocalShardSnapshot implements Closeable {
|
|||
store.incRef();
|
||||
boolean success = false;
|
||||
try {
|
||||
indexCommit = shard.snapshotIndex(true);
|
||||
indexCommit = shard.acquireIndexCommit(true);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
@ -120,7 +118,7 @@ final class LocalShardSnapshot implements Closeable {
|
|||
public void close() throws IOException {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
try {
|
||||
shard.releaseSnapshot(indexCommit);
|
||||
shard.releaseIndexCommit(indexCommit);
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
|
|
|
@ -109,4 +109,9 @@ public final class ShadowIndexShard extends IndexShard {
|
|||
public void addRefreshListener(Translog.Location location, Consumer<Boolean> listener) {
|
||||
throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
|
||||
throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.elasticsearch.env.ShardLock;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -208,45 +209,17 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new MetadataSnapshot for the latest commit in this store or
|
||||
* an empty snapshot if no index exists or can not be opened.
|
||||
*
|
||||
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
|
||||
* unexpected exception when opening the index reading the segments file.
|
||||
* @throws IndexFormatTooOldException if the lucene index is too old to be opened.
|
||||
* @throws IndexFormatTooNewException if the lucene index is too new to be opened.
|
||||
*/
|
||||
public MetadataSnapshot getMetadataOrEmpty() throws IOException {
|
||||
try {
|
||||
return getMetadata(null);
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// that's fine - happens all the time no need to log
|
||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||
logger.info("Failed to open / find files while reading metadata snapshot");
|
||||
}
|
||||
return MetadataSnapshot.EMPTY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new MetadataSnapshot for the latest commit in this store.
|
||||
*
|
||||
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
|
||||
* unexpected exception when opening the index reading the segments file.
|
||||
* @throws IndexFormatTooOldException if the lucene index is too old to be opened.
|
||||
* @throws IndexFormatTooNewException if the lucene index is too new to be opened.
|
||||
* @throws FileNotFoundException if one or more files referenced by a commit are not present.
|
||||
* @throws NoSuchFileException if one or more files referenced by a commit are not present.
|
||||
* @throws IndexNotFoundException if no index / valid commit-point can be found in this store
|
||||
*/
|
||||
public MetadataSnapshot getMetadata() throws IOException {
|
||||
return getMetadata(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new MetadataSnapshot for the given commit. If the given commit is <code>null</code>
|
||||
* the latest commit point is used.
|
||||
*
|
||||
* Note that this method requires the caller verify it has the right to access the store and
|
||||
* no concurrent file changes are happening. If in doubt, you probably want to use one of the following:
|
||||
*
|
||||
* {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, ESLogger)} to read a meta data while locking
|
||||
* {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard
|
||||
* {@link IndexShard#acquireIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed
|
||||
*
|
||||
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
|
||||
* unexpected exception when opening the index reading the segments file.
|
||||
* @throws IndexFormatTooOldException if the lucene index is too old to be opened.
|
||||
|
@ -634,7 +607,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
// ignore, we don't really care, will get deleted later on
|
||||
}
|
||||
}
|
||||
final Store.MetadataSnapshot metadataOrEmpty = getMetadata();
|
||||
final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
|
||||
verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
|
||||
} finally {
|
||||
metadataLock.writeLock().unlock();
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
|||
import org.elasticsearch.common.geo.ShapesAvailability;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
|
@ -67,6 +68,7 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
|||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
@ -80,19 +82,22 @@ public class IndicesModule extends AbstractModule {
|
|||
private final Map<String, Mapper.TypeParser> mapperParsers;
|
||||
private final Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers;
|
||||
private final MapperRegistry mapperRegistry;
|
||||
private final NamedWriteableRegistry namedWritableRegistry;
|
||||
private final List<Entry> namedWritables = new ArrayList<>();
|
||||
|
||||
public IndicesModule(NamedWriteableRegistry namedWriteableRegistry, List<MapperPlugin> mapperPlugins) {
|
||||
this.namedWritableRegistry = namedWriteableRegistry;
|
||||
public IndicesModule(List<MapperPlugin> mapperPlugins) {
|
||||
this.mapperParsers = getMappers(mapperPlugins);
|
||||
this.metadataMapperParsers = getMetadataMappers(mapperPlugins);
|
||||
this.mapperRegistry = new MapperRegistry(mapperParsers, metadataMapperParsers);
|
||||
registerBuildInWritables();
|
||||
registerBuiltinWritables();
|
||||
}
|
||||
|
||||
private void registerBuildInWritables() {
|
||||
namedWritableRegistry.register(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new);
|
||||
namedWritableRegistry.register(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new);
|
||||
private void registerBuiltinWritables() {
|
||||
namedWritables.add(new Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new));
|
||||
namedWritables.add(new Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new));
|
||||
}
|
||||
|
||||
public List<Entry> getNamedWriteables() {
|
||||
return namedWritables;
|
||||
}
|
||||
|
||||
private Map<String, Mapper.TypeParser> getMappers(List<MapperPlugin> mapperPlugins) {
|
||||
|
|
|
@ -127,7 +127,7 @@ public class RecoverySourceHandler {
|
|||
logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration());
|
||||
final IndexCommit phase1Snapshot;
|
||||
try {
|
||||
phase1Snapshot = shard.snapshotIndex(false);
|
||||
phase1Snapshot = shard.acquireIndexCommit(false);
|
||||
} catch (Exception e) {
|
||||
IOUtils.closeWhileHandlingException(translogView);
|
||||
throw new RecoveryEngineException(shard.shardId(), 1, "Snapshot failed", e);
|
||||
|
@ -139,7 +139,7 @@ public class RecoverySourceHandler {
|
|||
throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e);
|
||||
} finally {
|
||||
try {
|
||||
shard.releaseSnapshot(phase1Snapshot);
|
||||
shard.releaseIndexCommit(phase1Snapshot);
|
||||
} catch (IOException ex) {
|
||||
logger.warn("releasing snapshot caused exception", ex);
|
||||
}
|
||||
|
|
|
@ -167,7 +167,13 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
|
|||
logger.trace("collecting local files for {}", recoveryTarget);
|
||||
Store.MetadataSnapshot metadataSnapshot = null;
|
||||
try {
|
||||
metadataSnapshot = recoveryTarget.store().getMetadataOrEmpty();
|
||||
if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) {
|
||||
// we are not going to copy any files, so don't bother listing files, potentially running
|
||||
// into concurrency issues with the primary changing files underneath us.
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
} else {
|
||||
metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.warn("error while listing local files, recover as if there are none", e);
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
|
@ -178,6 +184,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
|
|||
new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e), true);
|
||||
return;
|
||||
}
|
||||
logger.trace("{} local file count: [{}]", recoveryTarget, metadataSnapshot.size());
|
||||
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(),
|
||||
clusterService.localNode(),
|
||||
metadataSnapshot, recoveryTarget.state().getType(), recoveryTarget.recoveryId());
|
||||
|
|
|
@ -123,14 +123,8 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
|||
if (indexService != null) {
|
||||
IndexShard indexShard = indexService.getShardOrNull(shardId.id());
|
||||
if (indexShard != null) {
|
||||
final Store store = indexShard.store();
|
||||
store.incRef();
|
||||
try {
|
||||
exists = true;
|
||||
return new StoreFilesMetaData(shardId, store.getMetadataOrEmpty());
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
exists = true;
|
||||
return new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata());
|
||||
}
|
||||
}
|
||||
// try and see if we an list unallocated
|
||||
|
|
|
@ -133,7 +133,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used
|
||||
|
@ -302,7 +304,6 @@ public class Node implements Closeable {
|
|||
resourcesToClose.add(clusterService);
|
||||
final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId());
|
||||
resourcesToClose.add(tribeService);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
final IngestService ingestService = new IngestService(settings, threadPool, this.environment,
|
||||
scriptModule.getScriptService(), pluginsService.filterPlugins(IngestPlugin.class));
|
||||
|
||||
|
@ -313,12 +314,15 @@ public class Node implements Closeable {
|
|||
}
|
||||
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);
|
||||
modules.add(new NodeModule(this, monitorService));
|
||||
modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry));
|
||||
NetworkModule networkModule = new NetworkModule(networkService, settings, false);
|
||||
modules.add(networkModule);
|
||||
modules.add(new DiscoveryModule(this.settings));
|
||||
ClusterModule clusterModule = new ClusterModule(settings, clusterService);
|
||||
modules.add(clusterModule);
|
||||
modules.add(new IndicesModule(namedWriteableRegistry, pluginsService.filterPlugins(MapperPlugin.class)));
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry, false, pluginsService.filterPlugins(SearchPlugin.class)));
|
||||
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
|
||||
modules.add(indicesModule);
|
||||
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
modules.add(searchModule);
|
||||
modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings,
|
||||
clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class)));
|
||||
|
@ -331,6 +335,14 @@ public class Node implements Closeable {
|
|||
BigArrays bigArrays = createBigArrays(settings, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
|
||||
networkModule.getNamedWriteables().stream(),
|
||||
indicesModule.getNamedWriteables().stream(),
|
||||
searchModule.getNamedWriteables().stream(),
|
||||
pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.flatMap(p -> p.getNamedWriteables().stream()))
|
||||
.flatMap(Function.identity()).collect(Collectors.toList());
|
||||
final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
|
||||
client = new NodeClient(settings, threadPool);
|
||||
Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService).stream())
|
||||
|
@ -349,6 +361,7 @@ public class Node implements Closeable {
|
|||
b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
|
||||
b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry());
|
||||
b.bind(IngestService.class).toInstance(ingestService);
|
||||
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
|
||||
}
|
||||
);
|
||||
|
|
|
@ -22,12 +22,16 @@ package org.elasticsearch.plugins;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
|
@ -92,6 +96,14 @@ public abstract class Plugin {
|
|||
return Settings.Builder.EMPTY_SETTINGS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns parsers for {@link NamedWriteable} this plugin will use over the transport protocol.
|
||||
* @see NamedWriteableRegistry
|
||||
*/
|
||||
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before a new index is created on a node. The given module can be used to register index-level
|
||||
* extensions.
|
||||
|
|
|
@ -98,7 +98,7 @@ public interface SearchPlugin {
|
|||
/**
|
||||
* Specification of custom {@link ScoreFunction}.
|
||||
*/
|
||||
public class ScoreFunctionSpec<T extends ScoreFunctionBuilder<T>> extends SearchExtensionSpec<T, ScoreFunctionParser<T>> {
|
||||
class ScoreFunctionSpec<T extends ScoreFunctionBuilder<T>> extends SearchExtensionSpec<T, ScoreFunctionParser<T>> {
|
||||
public ScoreFunctionSpec(ParseField name, Reader<T> reader, ScoreFunctionParser<T> parser) {
|
||||
super(name, reader, parser);
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public interface SearchPlugin {
|
|||
/**
|
||||
* Specification of custom {@link Query}.
|
||||
*/
|
||||
public class QuerySpec<T extends QueryBuilder> extends SearchExtensionSpec<T, QueryParser<T>> {
|
||||
class QuerySpec<T extends QueryBuilder> extends SearchExtensionSpec<T, QueryParser<T>> {
|
||||
/**
|
||||
* Specification of custom {@link Query}.
|
||||
*
|
||||
|
@ -148,7 +148,7 @@ public interface SearchPlugin {
|
|||
* @param P the type of the parser for this spec. The parser runs on the coordinating node, converting {@link XContent} into the
|
||||
* behavior to execute
|
||||
*/
|
||||
public class SearchExtensionSpec<W extends NamedWriteable, P> {
|
||||
class SearchExtensionSpec<W extends NamedWriteable, P> {
|
||||
private final ParseField name;
|
||||
private final Writeable.Reader<W> reader;
|
||||
private final P parser;
|
||||
|
@ -205,7 +205,7 @@ public interface SearchPlugin {
|
|||
/**
|
||||
* Context available during fetch phase construction.
|
||||
*/
|
||||
public class FetchPhaseConstructionContext {
|
||||
class FetchPhaseConstructionContext {
|
||||
private final Map<String, Highlighter> highlighters;
|
||||
|
||||
public FetchPhaseConstructionContext(Map<String, Highlighter> highlighters) {
|
||||
|
|
|
@ -23,14 +23,12 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotShardFailure;
|
||||
|
||||
|
@ -174,7 +172,7 @@ public interface Repository extends LifecycleComponent {
|
|||
/**
|
||||
* Creates a snapshot of the shard based on the index commit point.
|
||||
* <p>
|
||||
* The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#snapshotIndex} method.
|
||||
* The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireIndexCommit} method.
|
||||
* Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller.
|
||||
* <p>
|
||||
* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.IndexNotFoundException;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
|
@ -40,14 +41,38 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.compress.NotXContentException;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotException;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException;
|
||||
|
@ -61,37 +86,13 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.compress.NotXContentException;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.RepositoryVerificationException;
|
||||
import org.elasticsearch.snapshots.SnapshotCreationException;
|
||||
import org.elasticsearch.snapshots.SnapshotException;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotShardFailure;
|
||||
|
@ -1444,7 +1445,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
*/
|
||||
private class RestoreContext extends Context {
|
||||
|
||||
private final Store store;
|
||||
private final IndexShard targetShard;
|
||||
|
||||
private final RecoveryState recoveryState;
|
||||
|
||||
|
@ -1460,13 +1461,14 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
public RestoreContext(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
|
||||
super(snapshotId, version, indexId, shard.shardId(), snapshotShardId);
|
||||
this.recoveryState = recoveryState;
|
||||
store = shard.store();
|
||||
this.targetShard = shard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs restore operation
|
||||
*/
|
||||
public void restore() throws IOException {
|
||||
final Store store = targetShard.store();
|
||||
store.incRef();
|
||||
try {
|
||||
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, metadata.name(), shardId);
|
||||
|
@ -1491,12 +1493,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
|
||||
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
|
||||
final Store.MetadataSnapshot recoveryTargetMetadata;
|
||||
Store.MetadataSnapshot recoveryTargetMetadata;
|
||||
try {
|
||||
recoveryTargetMetadata = store.getMetadataOrEmpty();
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
|
||||
logger.warn("{} Can't read metadata from store", e, shardId);
|
||||
throw new IndexShardRestoreFailedException(shardId, "Can't restore corrupted shard", e);
|
||||
recoveryTargetMetadata = targetShard.snapshotStoreMetadata();
|
||||
} catch (IndexNotFoundException e) {
|
||||
// happens when restore to an empty shard, not a big deal
|
||||
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
|
||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn("{} Can't read metadata from store, will not reuse any local file while restoring", e, shardId);
|
||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||
}
|
||||
|
||||
final List<BlobStoreIndexShardSnapshot.FileInfo> filesToRecover = new ArrayList<>();
|
||||
|
@ -1550,7 +1556,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
try {
|
||||
for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
|
||||
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
|
||||
restoreFile(fileToRecover);
|
||||
restoreFile(fileToRecover, store);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex);
|
||||
|
@ -1597,7 +1603,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
*
|
||||
* @param fileInfo file to be restored
|
||||
*/
|
||||
private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo) throws IOException {
|
||||
private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final Store store) throws IOException {
|
||||
boolean success = false;
|
||||
|
||||
try (InputStream partSliceStream = new PartSliceStream(blobContainer, fileInfo)) {
|
||||
|
|
|
@ -98,7 +98,7 @@ public class RestTasksAction extends AbstractCatAction {
|
|||
|
||||
// Task detailed info
|
||||
if (detailed) {
|
||||
table.addCell("description", "default:false;alias:desc;desc:task action");
|
||||
table.addCell("description", "default:true;alias:desc;desc:task action");
|
||||
}
|
||||
table.endHeaders();
|
||||
return table;
|
||||
|
@ -142,7 +142,7 @@ public class RestTasksAction extends AbstractCatAction {
|
|||
table.endRow();
|
||||
}
|
||||
|
||||
private void buildGroups(Table table, boolean detailed, boolean fullId, List<TaskGroup> taskGroups) {
|
||||
private void buildGroups(Table table, boolean fullId, boolean detailed, List<TaskGroup> taskGroups) {
|
||||
DiscoveryNodes discoveryNodes = clusterService.state().nodes();
|
||||
List<TaskGroup> sortedGroups = new ArrayList<>(taskGroups);
|
||||
sortedGroups.sort((o1, o2) -> Long.compare(o1.getTaskInfo().getStartTime(), o2.getTaskInfo().getStartTime()));
|
||||
|
|
|
@ -19,6 +19,13 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.elasticsearch.common.NamedRegistry;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -26,7 +33,7 @@ import org.elasticsearch.common.geo.ShapesAvailability;
|
|||
import org.elasticsearch.common.geo.builders.ShapeBuilders;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -116,6 +123,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre
|
|||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramParser;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;
|
||||
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;
|
||||
|
@ -245,8 +253,8 @@ import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipel
|
|||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.explain.ExplainFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.docvalues.DocValueFieldsFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.explain.ExplainFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.matchedqueries.MatchedQueriesFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.parent.ParentFieldSubFetchPhase;
|
||||
import org.elasticsearch.search.fetch.script.ScriptFieldsFetchSubPhase;
|
||||
|
@ -275,13 +283,6 @@ import org.elasticsearch.search.suggest.phrase.SmoothingModel;
|
|||
import org.elasticsearch.search.suggest.phrase.StupidBackoff;
|
||||
import org.elasticsearch.search.suggest.term.TermSuggester;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
|
@ -307,17 +308,15 @@ public class SearchModule extends AbstractModule {
|
|||
private final List<FetchSubPhase> fetchSubPhases = new ArrayList<>();
|
||||
|
||||
private final Settings settings;
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
private final List<Entry> namedWriteables = new ArrayList<>();
|
||||
public static final Setting<Integer> INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting("indices.query.bool.max_clause_count",
|
||||
1024, 1, Integer.MAX_VALUE, Setting.Property.NodeScope);
|
||||
|
||||
// pkg private so tests can mock
|
||||
Class<? extends SearchService> searchServiceImpl = SearchService.class;
|
||||
|
||||
public SearchModule(Settings settings, NamedWriteableRegistry namedWriteableRegistry, boolean transportClient,
|
||||
List<SearchPlugin> plugins) {
|
||||
public SearchModule(Settings settings, boolean transportClient, List<SearchPlugin> plugins) {
|
||||
this.settings = settings;
|
||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
this.transportClient = transportClient;
|
||||
suggesters = setupSuggesters(plugins);
|
||||
highlighters = setupHighlighters(settings, plugins);
|
||||
|
@ -330,6 +329,11 @@ public class SearchModule extends AbstractModule {
|
|||
registerMovingAverageModels(plugins);
|
||||
registerBuiltinAggregations();
|
||||
registerFetchSubPhases(plugins);
|
||||
registerShapes();
|
||||
}
|
||||
|
||||
public List<Entry> getNamedWriteables() {
|
||||
return namedWriteables;
|
||||
}
|
||||
|
||||
public Suggesters getSuggesters() {
|
||||
|
@ -368,11 +372,11 @@ public class SearchModule extends AbstractModule {
|
|||
if (false == transportClient) {
|
||||
aggregationParserRegistry.register(spec.parser, spec.name);
|
||||
}
|
||||
namedWriteableRegistry.register(AggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader);
|
||||
namedWriteables.add(new Entry(AggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader));
|
||||
for (Map.Entry<String, Writeable.Reader<? extends InternalAggregation>> t : spec.resultReaders.entrySet()) {
|
||||
String writeableName = t.getKey();
|
||||
Writeable.Reader<? extends InternalAggregation> internalReader = t.getValue();
|
||||
namedWriteableRegistry.register(InternalAggregation.class, writeableName, internalReader);
|
||||
namedWriteables.add(new Entry(InternalAggregation.class, writeableName, internalReader));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -421,10 +425,10 @@ public class SearchModule extends AbstractModule {
|
|||
if (false == transportClient) {
|
||||
pipelineAggregationParserRegistry.register(spec.parser, spec.name);
|
||||
}
|
||||
namedWriteableRegistry.register(PipelineAggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader);
|
||||
namedWriteableRegistry.register(PipelineAggregator.class, spec.name.getPreferredName(), spec.aggregatorReader);
|
||||
namedWriteables.add(new Entry(PipelineAggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader));
|
||||
namedWriteables.add(new Entry(PipelineAggregator.class, spec.name.getPreferredName(), spec.aggregatorReader));
|
||||
for (Map.Entry<String, Writeable.Reader<? extends InternalAggregation>> resultReader : spec.resultReaders.entrySet()) {
|
||||
namedWriteableRegistry.register(InternalAggregation.class, resultReader.getKey(), resultReader.getValue());
|
||||
namedWriteables.add(new Entry(InternalAggregation.class, resultReader.getKey(), resultReader.getValue()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -482,7 +486,6 @@ public class SearchModule extends AbstractModule {
|
|||
configureSearch();
|
||||
bind(AggregatorParsers.class).toInstance(aggregatorParsers);
|
||||
}
|
||||
configureShapes();
|
||||
}
|
||||
|
||||
private void registerBuiltinAggregations() {
|
||||
|
@ -546,7 +549,7 @@ public class SearchModule extends AbstractModule {
|
|||
registerAggregation(new AggregationSpec(HistogramAggregationBuilder::new, new HistogramParser(),
|
||||
HistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalHistogram::new));
|
||||
registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder::new, new DateHistogramParser(),
|
||||
DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD));
|
||||
DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateHistogram::new));
|
||||
registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(),
|
||||
GeoDistanceAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGeoDistance::new));
|
||||
registerAggregation(new AggregationSpec(GeoGridAggregationBuilder::new, new GeoHashGridParser(),
|
||||
|
@ -656,21 +659,21 @@ public class SearchModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
|
||||
private void configureShapes() {
|
||||
private void registerShapes() {
|
||||
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
|
||||
ShapeBuilders.register(namedWriteableRegistry);
|
||||
ShapeBuilders.register(namedWriteables);
|
||||
}
|
||||
}
|
||||
|
||||
private void registerRescorers() {
|
||||
namedWriteableRegistry.register(RescoreBuilder.class, QueryRescorerBuilder.NAME, QueryRescorerBuilder::new);
|
||||
namedWriteables.add(new Entry(RescoreBuilder.class, QueryRescorerBuilder.NAME, QueryRescorerBuilder::new));
|
||||
}
|
||||
|
||||
private void registerSorts() {
|
||||
namedWriteableRegistry.register(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new);
|
||||
namedWriteableRegistry.register(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new);
|
||||
namedWriteableRegistry.register(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new);
|
||||
namedWriteableRegistry.register(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new);
|
||||
namedWriteables.add(new Entry(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new));
|
||||
namedWriteables.add(new Entry(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new));
|
||||
namedWriteables.add(new Entry(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new));
|
||||
namedWriteables.add(new Entry(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new));
|
||||
}
|
||||
|
||||
private <T> void registerFromPlugin(List<SearchPlugin> plugins, Function<SearchPlugin, List<T>> producer, Consumer<T> consumer) {
|
||||
|
@ -681,21 +684,21 @@ public class SearchModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
|
||||
public static void registerSmoothingModels(NamedWriteableRegistry namedWriteableRegistry) {
|
||||
namedWriteableRegistry.register(SmoothingModel.class, Laplace.NAME, Laplace::new);
|
||||
namedWriteableRegistry.register(SmoothingModel.class, LinearInterpolation.NAME, LinearInterpolation::new);
|
||||
namedWriteableRegistry.register(SmoothingModel.class, StupidBackoff.NAME, StupidBackoff::new);
|
||||
public static void registerSmoothingModels(List<Entry> namedWriteables) {
|
||||
namedWriteables.add(new Entry(SmoothingModel.class, Laplace.NAME, Laplace::new));
|
||||
namedWriteables.add(new Entry(SmoothingModel.class, LinearInterpolation.NAME, LinearInterpolation::new));
|
||||
namedWriteables.add(new Entry(SmoothingModel.class, StupidBackoff.NAME, StupidBackoff::new));
|
||||
}
|
||||
|
||||
private Map<String, Suggester<?>> setupSuggesters(List<SearchPlugin> plugins) {
|
||||
registerSmoothingModels(namedWriteableRegistry);
|
||||
registerSmoothingModels(namedWriteables);
|
||||
|
||||
// Suggester<?> is weird - it is both a Parser and a reader....
|
||||
NamedRegistry<Suggester<?>> suggesters = new NamedRegistry<Suggester<?>>("suggester") {
|
||||
@Override
|
||||
public void register(String name, Suggester<?> t) {
|
||||
super.register(name, t);
|
||||
namedWriteableRegistry.register(SuggestionBuilder.class, name, t);
|
||||
namedWriteables.add(new Entry(SuggestionBuilder.class, name, t));
|
||||
}
|
||||
};
|
||||
suggesters.register("phrase", PhraseSuggester.INSTANCE);
|
||||
|
@ -733,14 +736,14 @@ public class SearchModule extends AbstractModule {
|
|||
|
||||
//weight doesn't have its own parser, so every function supports it out of the box.
|
||||
//Can be a single function too when not associated to any other function, which is why it needs to be registered manually here.
|
||||
namedWriteableRegistry.register(ScoreFunctionBuilder.class, WeightBuilder.NAME, WeightBuilder::new);
|
||||
namedWriteables.add(new Entry(ScoreFunctionBuilder.class, WeightBuilder.NAME, WeightBuilder::new));
|
||||
|
||||
registerFromPlugin(plugins, SearchPlugin::getScoreFunctions, this::registerScoreFunction);
|
||||
}
|
||||
|
||||
private void registerScoreFunction(ScoreFunctionSpec<?> scoreFunction) {
|
||||
scoreFunctionParserRegistry.register(scoreFunction.getParser(), scoreFunction.getName());
|
||||
namedWriteableRegistry.register(ScoreFunctionBuilder.class, scoreFunction.getName().getPreferredName(), scoreFunction.getReader());
|
||||
namedWriteables.add(new Entry(ScoreFunctionBuilder.class, scoreFunction.getName().getPreferredName(), scoreFunction.getReader()));
|
||||
}
|
||||
|
||||
private void registerValueFormats() {
|
||||
|
@ -756,7 +759,7 @@ public class SearchModule extends AbstractModule {
|
|||
* Register a new ValueFormat.
|
||||
*/
|
||||
private void registerValueFormat(String name, Writeable.Reader<? extends DocValueFormat> reader) {
|
||||
namedWriteableRegistry.register(DocValueFormat.class, name, reader);
|
||||
namedWriteables.add(new Entry(DocValueFormat.class, name, reader));
|
||||
}
|
||||
|
||||
private void registerSignificanceHeuristics(List<SearchPlugin> plugins) {
|
||||
|
@ -772,7 +775,7 @@ public class SearchModule extends AbstractModule {
|
|||
|
||||
private void registerSignificanceHeuristic(SearchExtensionSpec<SignificanceHeuristic, SignificanceHeuristicParser> heuristic) {
|
||||
significanceHeuristicParserRegistry.register(heuristic.getParser(), heuristic.getName());
|
||||
namedWriteableRegistry.register(SignificanceHeuristic.class, heuristic.getName().getPreferredName(), heuristic.getReader());
|
||||
namedWriteables.add(new Entry(SignificanceHeuristic.class, heuristic.getName().getPreferredName(), heuristic.getReader()));
|
||||
}
|
||||
|
||||
private void registerMovingAverageModels(List<SearchPlugin> plugins) {
|
||||
|
@ -787,7 +790,7 @@ public class SearchModule extends AbstractModule {
|
|||
|
||||
private void registerMovingAverageModel(SearchExtensionSpec<MovAvgModel, MovAvgModel.AbstractModelParser> movAvgModel) {
|
||||
movingAverageModelParserRegistry.register(movAvgModel.getParser(), movAvgModel.getName());
|
||||
namedWriteableRegistry.register(MovAvgModel.class, movAvgModel.getName().getPreferredName(), movAvgModel.getReader());
|
||||
namedWriteables.add(new Entry(MovAvgModel.class, movAvgModel.getName().getPreferredName(), movAvgModel.getReader()));
|
||||
}
|
||||
|
||||
private void registerFetchSubPhases(List<SearchPlugin> plugins) {
|
||||
|
@ -881,6 +884,6 @@ public class SearchModule extends AbstractModule {
|
|||
|
||||
private void registerQuery(QuerySpec<?> spec) {
|
||||
queryParserRegistry.register(spec.getParser(), spec.getName());
|
||||
namedWriteableRegistry.register(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader());
|
||||
namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
|
||||
public abstract class AbstractHistogramAggregatorFactory<AF extends AbstractHistogramAggregatorFactory<AF>>
|
||||
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, AF> {
|
||||
|
||||
protected final long interval;
|
||||
protected final long offset;
|
||||
protected final InternalOrder order;
|
||||
protected final boolean keyed;
|
||||
protected final long minDocCount;
|
||||
protected final ExtendedBounds extendedBounds;
|
||||
private final InternalHistogram.Factory<?> histogramFactory;
|
||||
|
||||
public AbstractHistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, long interval, long offset,
|
||||
InternalOrder order, boolean keyed, long minDocCount, ExtendedBounds extendedBounds,
|
||||
InternalHistogram.Factory<?> histogramFactory, AggregationContext context, AggregatorFactory<?> parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
super(name, type, config, context, parent, subFactoriesBuilder, metaData);
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.extendedBounds = extendedBounds;
|
||||
this.histogramFactory = histogramFactory;
|
||||
}
|
||||
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
return createAggregator(null, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
protected Rounding createRounding() {
|
||||
if (interval < 1) {
|
||||
throw new ParsingException(null, "[interval] must be 1 or greater for histogram aggregation [" + name() + "]: " + interval);
|
||||
}
|
||||
|
||||
Rounding rounding = new Rounding.Interval(interval);
|
||||
if (offset != 0) {
|
||||
rounding = new Rounding.OffsetRounding(rounding, offset);
|
||||
}
|
||||
return rounding;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
if (collectsFromSingleBucket == false) {
|
||||
return asMultiBucketAggregator(this, context, parent);
|
||||
}
|
||||
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
Rounding rounding = createRounding();
|
||||
// we need to round the bounds given by the user and we have to do it
|
||||
// for every aggregator we create
|
||||
// as the rounding is not necessarily an idempotent operation.
|
||||
// todo we need to think of a better structure to the factory/agtor
|
||||
// code so we won't need to do that
|
||||
ExtendedBounds roundedBounds = null;
|
||||
if (extendedBounds != null) {
|
||||
// parse any string bounds to longs and round them
|
||||
roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding);
|
||||
}
|
||||
return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource,
|
||||
config.format(), histogramFactory, context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class AbstractHistogramBuilder<AB extends AbstractHistogramBuilder<AB>>
|
||||
extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, AB> {
|
||||
|
||||
protected long interval;
|
||||
protected long offset = 0;
|
||||
protected InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
|
||||
protected boolean keyed = false;
|
||||
protected long minDocCount = 0;
|
||||
protected ExtendedBounds extendedBounds;
|
||||
|
||||
protected AbstractHistogramBuilder(String name, InternalHistogram.Factory<?> histogramFactory) {
|
||||
super(name, histogramFactory.type(), ValuesSourceType.NUMERIC, histogramFactory.valueType());
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
protected AbstractHistogramBuilder(StreamInput in, InternalHistogram.Factory<?> histogramFactory) throws IOException {
|
||||
super(in, histogramFactory.type(), ValuesSourceType.NUMERIC, histogramFactory.valueType());
|
||||
interval = in.readVLong();
|
||||
offset = in.readLong();
|
||||
if (in.readBoolean()) {
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
}
|
||||
keyed = in.readBoolean();
|
||||
minDocCount = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
extendedBounds = new ExtendedBounds(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(interval);
|
||||
out.writeLong(offset);
|
||||
boolean hasOrder = order != null;
|
||||
out.writeBoolean(hasOrder);
|
||||
if (hasOrder) {
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
}
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVLong(minDocCount);
|
||||
boolean hasExtendedBounds = extendedBounds != null;
|
||||
out.writeBoolean(hasExtendedBounds);
|
||||
if (hasExtendedBounds) {
|
||||
extendedBounds.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public long interval() {
|
||||
return interval;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB interval(long interval) {
|
||||
if (interval < 1) {
|
||||
throw new IllegalArgumentException("[interval] must be 1 or greater for histogram aggregation [" + name + "]");
|
||||
}
|
||||
this.interval = interval;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public long offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB offset(long offset) {
|
||||
this.offset = offset;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public Histogram.Order order() {
|
||||
return order;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB order(Histogram.Order order) {
|
||||
if (order == null) {
|
||||
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
|
||||
}
|
||||
this.order = (InternalOrder) order;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public boolean keyed() {
|
||||
return keyed;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB keyed(boolean keyed) {
|
||||
this.keyed = keyed;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB minDocCount(long minDocCount) {
|
||||
if (minDocCount < 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]");
|
||||
}
|
||||
this.minDocCount = minDocCount;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public ExtendedBounds extendedBounds() {
|
||||
return extendedBounds;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB extendedBounds(ExtendedBounds extendedBounds) {
|
||||
if (extendedBounds == null) {
|
||||
throw new IllegalArgumentException("[extendedBounds] must not be null: [" + name + "]");
|
||||
}
|
||||
this.extendedBounds = extendedBounds;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
|
||||
builder.field(Rounding.Interval.INTERVAL_FIELD.getPreferredName());
|
||||
doXContentInterval(builder, params);
|
||||
builder.field(Rounding.OffsetRounding.OFFSET_FIELD.getPreferredName(), offset);
|
||||
|
||||
if (order != null) {
|
||||
builder.field(HistogramAggregator.ORDER_FIELD.getPreferredName());
|
||||
order.toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.field(HistogramAggregator.KEYED_FIELD.getPreferredName(), keyed);
|
||||
|
||||
builder.field(HistogramAggregator.MIN_DOC_COUNT_FIELD.getPreferredName(), minDocCount);
|
||||
|
||||
if (extendedBounds != null) {
|
||||
extendedBounds.toXContent(builder, params);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected XContentBuilder doXContentInterval(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.value(interval);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return InternalHistogram.TYPE.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int innerHashCode() {
|
||||
return Objects.hash(interval, offset, order, keyed, minDocCount, extendedBounds);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean innerEquals(Object obj) {
|
||||
AbstractHistogramBuilder<?> other = (AbstractHistogramBuilder<?>) obj;
|
||||
return Objects.equals(interval, other.interval)
|
||||
&& Objects.equals(offset, other.offset)
|
||||
&& Objects.equals(order, other.order)
|
||||
&& Objects.equals(keyed, other.keyed)
|
||||
&& Objects.equals(minDocCount, other.minDocCount)
|
||||
&& Objects.equals(extendedBounds, other.extendedBounds);
|
||||
}
|
||||
}
|
|
@ -27,40 +27,91 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<DateHistogramAggregationBuilder> {
|
||||
|
||||
/**
|
||||
* A builder for histograms on date fields.
|
||||
*/
|
||||
public class DateHistogramAggregationBuilder
|
||||
extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, DateHistogramAggregationBuilder> {
|
||||
public static final String NAME = InternalDateHistogram.TYPE.name();
|
||||
public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);
|
||||
|
||||
private long interval;
|
||||
private DateHistogramInterval dateHistogramInterval;
|
||||
private long offset = 0;
|
||||
private ExtendedBounds extendedBounds;
|
||||
private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
|
||||
private boolean keyed = false;
|
||||
private long minDocCount = 0;
|
||||
|
||||
/** Create a new builder with the given name. */
|
||||
public DateHistogramAggregationBuilder(String name) {
|
||||
super(name, InternalDateHistogram.HISTOGRAM_FACTORY);
|
||||
super(name, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
/** Read from a stream, for internal use only. */
|
||||
public DateHistogramAggregationBuilder(StreamInput in) throws IOException {
|
||||
super(in, InternalDateHistogram.HISTOGRAM_FACTORY);
|
||||
super(in, InternalDateHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DATE);
|
||||
if (in.readBoolean()) {
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
}
|
||||
keyed = in.readBoolean();
|
||||
minDocCount = in.readVLong();
|
||||
interval = in.readLong();
|
||||
dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new);
|
||||
offset = in.readLong();
|
||||
extendedBounds = in.readOptionalWriteable(ExtendedBounds::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
super.innerWriteTo(out);
|
||||
boolean hasOrder = order != null;
|
||||
out.writeBoolean(hasOrder);
|
||||
if (hasOrder) {
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
}
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVLong(minDocCount);
|
||||
out.writeLong(interval);
|
||||
out.writeOptionalWriteable(dateHistogramInterval);
|
||||
out.writeLong(offset);
|
||||
out.writeOptionalWriteable(extendedBounds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the interval.
|
||||
*/
|
||||
/** Get the current interval in milliseconds that is set on this builder. */
|
||||
public double interval() {
|
||||
return interval;
|
||||
}
|
||||
|
||||
/** Set the interval on this builder, and return the builder so that calls can be chained.
|
||||
* If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the
|
||||
* {@link #dateHistogramInterval()} wins. */
|
||||
public DateHistogramAggregationBuilder interval(long interval) {
|
||||
if (interval < 1) {
|
||||
throw new IllegalArgumentException("[interval] must be 1 or greater for histogram aggregation [" + name + "]");
|
||||
}
|
||||
this.interval = interval;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Get the current date interval that is set on this builder. */
|
||||
public DateHistogramInterval dateHistogramInterval() {
|
||||
return dateHistogramInterval;
|
||||
}
|
||||
|
||||
/** Set the interval on this builder, and return the builder so that calls can be chained.
|
||||
* If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the
|
||||
* {@link #dateHistogramInterval()} wins. */
|
||||
public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterval dateHistogramInterval) {
|
||||
if (dateHistogramInterval == null) {
|
||||
throw new IllegalArgumentException("[dateHistogramInterval] must not be null: [" + name + "]");
|
||||
|
@ -69,6 +120,20 @@ public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<Da
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Get the offset to use when rounding, which is a number of milliseconds. */
|
||||
public double offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
/** Set the offset on this builder, which is a number of milliseconds, and
|
||||
* return the builder so that calls can be chained. */
|
||||
public DateHistogramAggregationBuilder offset(long offset) {
|
||||
this.offset = offset;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set the offset on this builder, as a time value, and
|
||||
* return the builder so that calls can be chained. */
|
||||
public DateHistogramAggregationBuilder offset(String offset) {
|
||||
if (offset == null) {
|
||||
throw new IllegalArgumentException("[offset] must not be null: [" + name + "]");
|
||||
|
@ -76,7 +141,7 @@ public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<Da
|
|||
return offset(parseStringOffset(offset));
|
||||
}
|
||||
|
||||
protected static long parseStringOffset(String offset) {
|
||||
static long parseStringOffset(String offset) {
|
||||
if (offset.charAt(0) == '-') {
|
||||
return -TimeValue
|
||||
.parseTimeValue(offset.substring(1), null, DateHistogramAggregationBuilder.class.getSimpleName() + ".parseOffset")
|
||||
|
@ -88,15 +153,90 @@ public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<Da
|
|||
.millis();
|
||||
}
|
||||
|
||||
public DateHistogramInterval dateHistogramInterval() {
|
||||
return dateHistogramInterval;
|
||||
/** Return extended bounds for this histogram, or {@code null} if none are set. */
|
||||
public ExtendedBounds extendedBounds() {
|
||||
return extendedBounds;
|
||||
}
|
||||
|
||||
/** Set extended bounds on this histogram, so that buckets would also be
|
||||
* generated on intervals that did not match any documents. */
|
||||
public DateHistogramAggregationBuilder extendedBounds(ExtendedBounds extendedBounds) {
|
||||
if (extendedBounds == null) {
|
||||
throw new IllegalArgumentException("[extendedBounds] must not be null: [" + name + "]");
|
||||
}
|
||||
this.extendedBounds = extendedBounds;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return the order to use to sort buckets of this histogram. */
|
||||
public Histogram.Order order() {
|
||||
return order;
|
||||
}
|
||||
|
||||
/** Set a new order on this builder and return the builder so that calls
|
||||
* can be chained. */
|
||||
public DateHistogramAggregationBuilder order(Histogram.Order order) {
|
||||
if (order == null) {
|
||||
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
|
||||
}
|
||||
this.order = (InternalOrder) order;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return whether buckets should be returned as a hash. In case
|
||||
* {@code keyed} is false, buckets will be returned as an array. */
|
||||
public boolean keyed() {
|
||||
return keyed;
|
||||
}
|
||||
|
||||
/** Set whether to return buckets as a hash or as an array, and return the
|
||||
* builder so that calls can be chained. */
|
||||
public DateHistogramAggregationBuilder keyed(boolean keyed) {
|
||||
this.keyed = keyed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return the minimum count of documents that buckets need to have in order
|
||||
* to be included in the response. */
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
/** Set the minimum count of matching documents that buckets need to have
|
||||
* and return this builder so that calls can be chained. */
|
||||
public DateHistogramAggregationBuilder minDocCount(long minDocCount) {
|
||||
if (minDocCount < 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]");
|
||||
}
|
||||
this.minDocCount = minDocCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DateHistogramAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount,
|
||||
extendedBounds, context, parent, subFactoriesBuilder, metaData);
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
|
||||
if (dateHistogramInterval == null) {
|
||||
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);
|
||||
} else {
|
||||
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString());
|
||||
}
|
||||
builder.field(Histogram.OFFSET_FIELD.getPreferredName(), offset);
|
||||
|
||||
if (order != null) {
|
||||
builder.field(Histogram.ORDER_FIELD.getPreferredName());
|
||||
order.toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.field(Histogram.KEYED_FIELD.getPreferredName(), keyed);
|
||||
|
||||
builder.field(Histogram.MIN_DOC_COUNT_FIELD.getPreferredName(), minDocCount);
|
||||
|
||||
if (extendedBounds != null) {
|
||||
extendedBounds.toXContent(builder, params);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,23 +245,26 @@ public class DateHistogramAggregationBuilder extends AbstractHistogramBuilder<Da
|
|||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentInterval(XContentBuilder builder, Params params) throws IOException {
|
||||
if (dateHistogramInterval == null) {
|
||||
super.doXContentInterval(builder, params);
|
||||
} else {
|
||||
builder.value(dateHistogramInterval.toString());
|
||||
}
|
||||
return builder;
|
||||
protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount,
|
||||
extendedBounds, context, parent, subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int innerHashCode() {
|
||||
return Objects.hash(super.innerHashCode(), dateHistogramInterval);
|
||||
return Objects.hash(order, keyed, minDocCount, interval, dateHistogramInterval, minDocCount, extendedBounds);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean innerEquals(Object obj) {
|
||||
DateHistogramAggregationBuilder other = (DateHistogramAggregationBuilder) obj;
|
||||
return super.innerEquals(obj) && Objects.equals(dateHistogramInterval, other.dateHistogramInterval);
|
||||
return Objects.equals(order, other.order)
|
||||
&& Objects.equals(keyed, other.keyed)
|
||||
&& Objects.equals(minDocCount, other.minDocCount)
|
||||
&& Objects.equals(interval, other.interval)
|
||||
&& Objects.equals(dateHistogramInterval, other.dateHistogramInterval)
|
||||
&& Objects.equals(offset, other.offset)
|
||||
&& Objects.equals(extendedBounds, other.extendedBounds);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.rounding.TimeZoneRounding;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* An aggregator for date values. Every date is rounded down using a configured
|
||||
* {@link TimeZoneRounding}.
|
||||
* @see TimeZoneRounding
|
||||
*/
|
||||
class DateHistogramAggregator extends BucketsAggregator {
|
||||
|
||||
private final ValuesSource.Numeric valuesSource;
|
||||
private final DocValueFormat formatter;
|
||||
private final Rounding rounding;
|
||||
private final InternalOrder order;
|
||||
private final boolean keyed;
|
||||
|
||||
private final long minDocCount;
|
||||
private final ExtendedBounds extendedBounds;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,
|
||||
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,
|
||||
DocValueFormat formatter, AggregationContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.rounding = rounding;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.extendedBounds = extendedBounds;
|
||||
this.valuesSource = valuesSource;
|
||||
this.formatter = formatter;
|
||||
|
||||
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return (valuesSource != null && valuesSource.needsScores()) || super.needsScores();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
|
||||
final LeafBucketCollector sub) throws IOException {
|
||||
if (valuesSource == null) {
|
||||
return LeafBucketCollector.NO_OP_COLLECTOR;
|
||||
}
|
||||
final SortedNumericDocValues values = valuesSource.longValues(ctx);
|
||||
return new LeafBucketCollectorBase(sub, values) {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
values.setDocument(doc);
|
||||
final int valuesCount = values.count();
|
||||
|
||||
long previousRounded = Long.MIN_VALUE;
|
||||
for (int i = 0; i < valuesCount; ++i) {
|
||||
long value = values.valueAt(i);
|
||||
long rounded = rounding.round(value);
|
||||
assert rounded >= previousRounded;
|
||||
if (rounded == previousRounded) {
|
||||
continue;
|
||||
}
|
||||
long bucketOrd = bucketOrds.add(rounded);
|
||||
if (bucketOrd < 0) { // already seen
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
collectExistingBucket(sub, doc, bucketOrd);
|
||||
} else {
|
||||
collectBucket(sub, doc, bucketOrd);
|
||||
}
|
||||
previousRounded = rounded;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
|
||||
assert owningBucketOrdinal == 0;
|
||||
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
|
||||
for (long i = 0; i < bucketOrds.size(); i++) {
|
||||
buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i)));
|
||||
}
|
||||
|
||||
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
|
||||
CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());
|
||||
|
||||
// value source will be null for unmapped fields
|
||||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doClose() {
|
||||
Releasables.close(bucketOrds);
|
||||
}
|
||||
}
|
|
@ -23,23 +23,29 @@ import org.elasticsearch.common.rounding.DateTimeUnit;
|
|||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.rounding.TimeZoneRounding;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
|
||||
public class DateHistogramAggregatorFactory extends AbstractHistogramAggregatorFactory<DateHistogramAggregatorFactory> {
|
||||
public final class DateHistogramAggregatorFactory
|
||||
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, DateHistogramAggregatorFactory> {
|
||||
|
||||
public static final Map<String, DateTimeUnit> DATE_FIELD_UNITS;
|
||||
private final DateHistogramInterval dateHistogramInterval;
|
||||
|
||||
static {
|
||||
Map<String, DateTimeUnit> dateFieldUnits = new HashMap<>();
|
||||
|
@ -62,17 +68,33 @@ public class DateHistogramAggregatorFactory extends AbstractHistogramAggregatorF
|
|||
DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits);
|
||||
}
|
||||
|
||||
private final DateHistogramInterval dateHistogramInterval;
|
||||
private final long interval;
|
||||
private final long offset;
|
||||
private final InternalOrder order;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final ExtendedBounds extendedBounds;
|
||||
|
||||
public DateHistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, long interval,
|
||||
DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount,
|
||||
ExtendedBounds extendedBounds, AggregationContext context, AggregatorFactory<?> parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
super(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, InternalDateHistogram.HISTOGRAM_FACTORY,
|
||||
context, parent, subFactoriesBuilder, metaData);
|
||||
super(name, type, config, context, parent, subFactoriesBuilder, metaData);
|
||||
this.interval = interval;
|
||||
this.dateHistogramInterval = dateHistogramInterval;
|
||||
this.offset = offset;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.extendedBounds = extendedBounds;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Rounding createRounding() {
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
private Rounding createRounding() {
|
||||
TimeZoneRounding.Builder tzRoundingBuilder;
|
||||
if (dateHistogramInterval != null) {
|
||||
DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString());
|
||||
|
@ -94,4 +116,35 @@ public class DateHistogramAggregatorFactory extends AbstractHistogramAggregatorF
|
|||
return rounding;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
if (collectsFromSingleBucket == false) {
|
||||
return asMultiBucketAggregator(this, context, parent);
|
||||
}
|
||||
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
Rounding rounding = createRounding();
|
||||
// we need to round the bounds given by the user and we have to do it
|
||||
// for every aggregator we create
|
||||
// as the rounding is not necessarily an idempotent operation.
|
||||
// todo we need to think of a better structure to the factory/agtor
|
||||
// code so we won't need to do that
|
||||
ExtendedBounds roundedBounds = null;
|
||||
if (extendedBounds != null) {
|
||||
// parse any string bounds to longs and round them
|
||||
roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding);
|
||||
}
|
||||
return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource,
|
||||
config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
return createAggregator(null, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,12 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
|
||||
|
||||
|
@ -28,17 +32,13 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
* A parser for date histograms. This translates json into a
|
||||
* {@link DateHistogramAggregationBuilder} instance.
|
||||
*/
|
||||
public class DateHistogramParser extends HistogramParser {
|
||||
public class DateHistogramParser extends NumericValuesSourceParser {
|
||||
|
||||
public DateHistogramParser() {
|
||||
super(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object parseStringInterval(String text) {
|
||||
return new DateHistogramInterval(text);
|
||||
super(true, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -52,6 +52,8 @@ public class DateHistogramParser extends HistogramParser {
|
|||
factory.interval((Long) interval);
|
||||
} else if (interval instanceof DateHistogramInterval) {
|
||||
factory.dateHistogramInterval((DateHistogramInterval) interval);
|
||||
} else {
|
||||
throw new IllegalStateException("Unexpected interval class: " + interval.getClass());
|
||||
}
|
||||
Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);
|
||||
if (offset != null) {
|
||||
|
@ -62,21 +64,85 @@ public class DateHistogramParser extends HistogramParser {
|
|||
if (extendedBounds != null) {
|
||||
factory.extendedBounds(extendedBounds);
|
||||
}
|
||||
Boolean keyed = (Boolean) otherOptions.get(HistogramAggregator.KEYED_FIELD);
|
||||
Boolean keyed = (Boolean) otherOptions.get(Histogram.KEYED_FIELD);
|
||||
if (keyed != null) {
|
||||
factory.keyed(keyed);
|
||||
}
|
||||
Long minDocCount = (Long) otherOptions.get(HistogramAggregator.MIN_DOC_COUNT_FIELD);
|
||||
Long minDocCount = (Long) otherOptions.get(Histogram.MIN_DOC_COUNT_FIELD);
|
||||
if (minDocCount != null) {
|
||||
factory.minDocCount(minDocCount);
|
||||
}
|
||||
InternalOrder order = (InternalOrder) otherOptions.get(HistogramAggregator.ORDER_FIELD);
|
||||
InternalOrder order = (InternalOrder) otherOptions.get(Histogram.ORDER_FIELD);
|
||||
if (order != null) {
|
||||
factory.order(order);
|
||||
}
|
||||
return factory;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
|
||||
ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
|
||||
if (token.isValue()) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
|
||||
otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
|
||||
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD,
|
||||
DateHistogramAggregationBuilder.parseStringOffset(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {
|
||||
InternalOrder order = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
String dir = parser.text();
|
||||
boolean asc = "asc".equals(dir);
|
||||
if (!asc && !"desc".equals(dir)) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Unknown order direction in aggregation ["
|
||||
+ aggregationName + "]: [" + dir
|
||||
+ "]. Should be either [asc] or [desc]");
|
||||
}
|
||||
order = resolveOrder(currentFieldName, asc);
|
||||
}
|
||||
}
|
||||
otherOptions.put(Histogram.ORDER_FIELD, order);
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {
|
||||
try {
|
||||
otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher));
|
||||
} catch (Exception e) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Error parsing [{}]", e, aggregationName);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static InternalOrder resolveOrder(String key, boolean asc) {
|
||||
if ("_key".equals(key) || "_time".equals(key)) {
|
||||
return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
|
||||
|
@ -86,9 +152,4 @@ public class DateHistogramParser extends HistogramParser {
|
|||
}
|
||||
return new InternalOrder.Aggregation(key, asc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long parseStringOffset(String offset) throws IOException {
|
||||
return DateHistogramAggregationBuilder.parseStringOffset(offset);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.util.Objects;
|
|||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
public class ExtendedBounds implements ToXContent, Writeable {
|
||||
static final ParseField EXTENDED_BOUNDS_FIELD = new ParseField("extended_bounds");
|
||||
static final ParseField EXTENDED_BOUNDS_FIELD = Histogram.EXTENDED_BOUNDS_FIELD;
|
||||
static final ParseField MIN_FIELD = new ParseField("min");
|
||||
static final ParseField MAX_FIELD = new ParseField("max");
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
|
||||
|
@ -29,6 +30,13 @@ import java.util.List;
|
|||
*/
|
||||
public interface Histogram extends MultiBucketsAggregation {
|
||||
|
||||
ParseField INTERVAL_FIELD = new ParseField("interval");
|
||||
ParseField OFFSET_FIELD = new ParseField("offset");
|
||||
ParseField ORDER_FIELD = new ParseField("order");
|
||||
ParseField KEYED_FIELD = new ParseField("keyed");
|
||||
ParseField MIN_DOC_COUNT_FIELD = new ParseField("min_doc_count");
|
||||
ParseField EXTENDED_BOUNDS_FIELD = new ParseField("extended_bounds");
|
||||
|
||||
/**
|
||||
* A bucket in the histogram where documents fall in
|
||||
*/
|
||||
|
@ -40,7 +48,7 @@ public interface Histogram extends MultiBucketsAggregation {
|
|||
* @return The buckets of this histogram (each bucket representing an interval in the histogram)
|
||||
*/
|
||||
@Override
|
||||
List<? extends Bucket> getBuckets();
|
||||
List<Bucket> getBuckets();
|
||||
|
||||
|
||||
/**
|
||||
|
@ -48,38 +56,48 @@ public interface Histogram extends MultiBucketsAggregation {
|
|||
*/
|
||||
abstract class Order implements ToXContent {
|
||||
|
||||
public static final Order KEY_ASC = new InternalOrder((byte) 1, "_key", true, new Comparator<InternalHistogram.Bucket>() {
|
||||
private static int compareKey(Histogram.Bucket b1, Histogram.Bucket b2) {
|
||||
if (b1 instanceof InternalHistogram.Bucket) {
|
||||
return Double.compare(((InternalHistogram.Bucket) b1).key, ((InternalHistogram.Bucket) b2).key);
|
||||
} else if (b1 instanceof InternalDateHistogram.Bucket) {
|
||||
return Long.compare(((InternalDateHistogram.Bucket) b1).key, ((InternalDateHistogram.Bucket) b2).key);
|
||||
} else {
|
||||
throw new IllegalStateException("Unexpected impl: " + b1.getClass());
|
||||
}
|
||||
}
|
||||
|
||||
public static final Order KEY_ASC = new InternalOrder((byte) 1, "_key", true, new Comparator<Histogram.Bucket>() {
|
||||
@Override
|
||||
public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
|
||||
return Long.compare(b1.key, b2.key);
|
||||
public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {
|
||||
return compareKey(b1, b2);
|
||||
}
|
||||
});
|
||||
|
||||
public static final Order KEY_DESC = new InternalOrder((byte) 2, "_key", false, new Comparator<InternalHistogram.Bucket>() {
|
||||
public static final Order KEY_DESC = new InternalOrder((byte) 2, "_key", false, new Comparator<Histogram.Bucket>() {
|
||||
@Override
|
||||
public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
|
||||
return -Long.compare(b1.key, b2.key);
|
||||
public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {
|
||||
return compareKey(b2, b1);
|
||||
}
|
||||
});
|
||||
|
||||
public static final Order COUNT_ASC = new InternalOrder((byte) 3, "_count", true, new Comparator<InternalHistogram.Bucket>() {
|
||||
public static final Order COUNT_ASC = new InternalOrder((byte) 3, "_count", true, new Comparator<Histogram.Bucket>() {
|
||||
@Override
|
||||
public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
|
||||
public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {
|
||||
int cmp = Long.compare(b1.getDocCount(), b2.getDocCount());
|
||||
if (cmp == 0) {
|
||||
cmp = Long.compare(b1.key, b2.key);
|
||||
cmp = compareKey(b1, b2);
|
||||
}
|
||||
return cmp;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
public static final Order COUNT_DESC = new InternalOrder((byte) 4, "_count", false, new Comparator<InternalHistogram.Bucket>() {
|
||||
public static final Order COUNT_DESC = new InternalOrder((byte) 4, "_count", false, new Comparator<Histogram.Bucket>() {
|
||||
@Override
|
||||
public int compare(InternalHistogram.Bucket b1, InternalHistogram.Bucket b2) {
|
||||
int cmp = -Long.compare(b1.getDocCount(), b2.getDocCount());
|
||||
public int compare(Histogram.Bucket b1, Histogram.Bucket b2) {
|
||||
int cmp = Long.compare(b2.getDocCount(), b1.getDocCount());
|
||||
if (cmp == 0) {
|
||||
cmp = Long.compare(b1.key, b2.key);
|
||||
cmp = compareKey(b1, b2);
|
||||
}
|
||||
return cmp;
|
||||
}
|
||||
|
@ -109,7 +127,7 @@ public interface Histogram extends MultiBucketsAggregation {
|
|||
/**
|
||||
* @return The bucket comparator by which the order will be applied.
|
||||
*/
|
||||
abstract Comparator<InternalHistogram.Bucket> comparator();
|
||||
abstract Comparator<Histogram.Bucket> comparator();
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,38 +21,224 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class HistogramAggregationBuilder extends AbstractHistogramBuilder<HistogramAggregationBuilder> {
|
||||
/**
|
||||
* A builder for histograms on numeric fields.
|
||||
*/
|
||||
public class HistogramAggregationBuilder
|
||||
extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, HistogramAggregationBuilder> {
|
||||
public static final String NAME = InternalHistogram.TYPE.name();
|
||||
public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);
|
||||
|
||||
private double interval;
|
||||
private double offset = 0;
|
||||
private double minBound = Double.MAX_VALUE;
|
||||
private double maxBound = Double.MIN_VALUE;
|
||||
private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
|
||||
private boolean keyed = false;
|
||||
private long minDocCount = 0;
|
||||
|
||||
/** Create a new builder with the given name. */
|
||||
public HistogramAggregationBuilder(String name) {
|
||||
super(name, InternalHistogram.HISTOGRAM_FACTORY);
|
||||
super(name, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
/** Read from a stream, for internal use only. */
|
||||
public HistogramAggregationBuilder(StreamInput in) throws IOException {
|
||||
super(in, InternalHistogram.HISTOGRAM_FACTORY);
|
||||
super(in, InternalHistogram.TYPE, ValuesSourceType.NUMERIC, ValueType.DOUBLE);
|
||||
if (in.readBoolean()) {
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
}
|
||||
keyed = in.readBoolean();
|
||||
minDocCount = in.readVLong();
|
||||
interval = in.readDouble();
|
||||
offset = in.readDouble();
|
||||
minBound = in.readDouble();
|
||||
maxBound = in.readDouble();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HistogramAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
return new HistogramAggregatorFactory(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, context,
|
||||
parent, subFactoriesBuilder, metaData);
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
boolean hasOrder = order != null;
|
||||
out.writeBoolean(hasOrder);
|
||||
if (hasOrder) {
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
}
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVLong(minDocCount);
|
||||
out.writeDouble(interval);
|
||||
out.writeDouble(offset);
|
||||
out.writeDouble(minBound);
|
||||
out.writeDouble(maxBound);
|
||||
}
|
||||
|
||||
/** Get the current interval that is set on this builder. */
|
||||
public double interval() {
|
||||
return interval;
|
||||
}
|
||||
|
||||
/** Set the interval on this builder, and return the builder so that calls can be chained. */
|
||||
public HistogramAggregationBuilder interval(double interval) {
|
||||
if (interval <= 0) {
|
||||
throw new IllegalArgumentException("[interval] must be >0 for histogram aggregation [" + name + "]");
|
||||
}
|
||||
this.interval = interval;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Get the current offset that is set on this builder. */
|
||||
public double offset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
/** Set the offset on this builder, and return the builder so that calls can be chained. */
|
||||
public HistogramAggregationBuilder offset(double offset) {
|
||||
this.offset = offset;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Get the current minimum bound that is set on this builder. */
|
||||
public double minBound() {
|
||||
return minBound;
|
||||
}
|
||||
|
||||
/** Get the current maximum bound that is set on this builder. */
|
||||
public double maxBound() {
|
||||
return maxBound;
|
||||
}
|
||||
|
||||
/** Set extended bounds on this builder: buckets between {@code minBound}
|
||||
* and {@code maxBound} will be created even if no documents fell into
|
||||
* these buckets. It is possible to create half-open bounds by providing
|
||||
* {@link Double#POSITIVE_INFINITY} as a {@code minBound} or
|
||||
* {@link Double#NEGATIVE_INFINITY} as a {@code maxBound}. */
|
||||
public HistogramAggregationBuilder extendedBounds(double minBound, double maxBound) {
|
||||
if (minBound == Double.NEGATIVE_INFINITY) {
|
||||
throw new IllegalArgumentException("minBound must not be -Infinity, got: " + minBound);
|
||||
}
|
||||
if (maxBound == Double.POSITIVE_INFINITY) {
|
||||
throw new IllegalArgumentException("maxBound must not be +Infinity, got: " + maxBound);
|
||||
}
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return the order to use to sort buckets of this histogram. */
|
||||
public Histogram.Order order() {
|
||||
return order;
|
||||
}
|
||||
|
||||
/** Set a new order on this builder and return the builder so that calls
|
||||
* can be chained. */
|
||||
public HistogramAggregationBuilder order(Histogram.Order order) {
|
||||
if (order == null) {
|
||||
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
|
||||
}
|
||||
this.order = (InternalOrder) order;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return whether buckets should be returned as a hash. In case
|
||||
* {@code keyed} is false, buckets will be returned as an array. */
|
||||
public boolean keyed() {
|
||||
return keyed;
|
||||
}
|
||||
|
||||
/** Set whether to return buckets as a hash or as an array, and return the
|
||||
* builder so that calls can be chained. */
|
||||
public HistogramAggregationBuilder keyed(boolean keyed) {
|
||||
this.keyed = keyed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Return the minimum count of documents that buckets need to have in order
|
||||
* to be included in the response. */
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
/** Set the minimum count of matching documents that buckets need to have
|
||||
* and return this builder so that calls can be chained. */
|
||||
public HistogramAggregationBuilder minDocCount(long minDocCount) {
|
||||
if (minDocCount < 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]");
|
||||
}
|
||||
this.minDocCount = minDocCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
|
||||
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);
|
||||
builder.field(Histogram.OFFSET_FIELD.getPreferredName(), offset);
|
||||
|
||||
if (order != null) {
|
||||
builder.field(Histogram.ORDER_FIELD.getPreferredName());
|
||||
order.toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.field(Histogram.KEYED_FIELD.getPreferredName(), keyed);
|
||||
|
||||
builder.field(Histogram.MIN_DOC_COUNT_FIELD.getPreferredName(), minDocCount);
|
||||
|
||||
if (Double.isFinite(minBound) || Double.isFinite(maxBound)) {
|
||||
builder.startObject(Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName());
|
||||
if (Double.isFinite(minBound)) {
|
||||
builder.field("min", minBound);
|
||||
}
|
||||
if (Double.isFinite(maxBound)) {
|
||||
builder.field("max", maxBound);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
return InternalHistogram.TYPE.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
return new HistogramAggregatorFactory(name, type, config, interval, offset, order, keyed, minDocCount, minBound, maxBound,
|
||||
context, parent, subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int innerHashCode() {
|
||||
return Objects.hash(order, keyed, minDocCount, interval, offset, minBound, maxBound);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean innerEquals(Object obj) {
|
||||
HistogramAggregationBuilder other = (HistogramAggregationBuilder) obj;
|
||||
return Objects.equals(order, other.order)
|
||||
&& Objects.equals(keyed, other.keyed)
|
||||
&& Objects.equals(minDocCount, other.minDocCount)
|
||||
&& Objects.equals(interval, other.interval)
|
||||
&& Objects.equals(offset, other.offset)
|
||||
&& Objects.equals(minBound, other.minBound)
|
||||
&& Objects.equals(maxBound, other.maxBound);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,16 +16,15 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
|
@ -33,6 +32,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
|
|||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -43,38 +43,43 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class HistogramAggregator extends BucketsAggregator {
|
||||
|
||||
public static final ParseField ORDER_FIELD = new ParseField("order");
|
||||
public static final ParseField KEYED_FIELD = new ParseField("keyed");
|
||||
public static final ParseField MIN_DOC_COUNT_FIELD = new ParseField("min_doc_count");
|
||||
/**
|
||||
* An aggregator for numeric values. For a given {@code interval},
|
||||
* {@code offset} and {@code value}, it returns the highest number that can be
|
||||
* written as {@code interval * x + offset} and yet is less than or equal to
|
||||
* {@code value}.
|
||||
*/
|
||||
class HistogramAggregator extends BucketsAggregator {
|
||||
|
||||
private final ValuesSource.Numeric valuesSource;
|
||||
private final DocValueFormat formatter;
|
||||
private final Rounding rounding;
|
||||
private final double interval, offset;
|
||||
private final InternalOrder order;
|
||||
private final boolean keyed;
|
||||
|
||||
private final long minDocCount;
|
||||
private final ExtendedBounds extendedBounds;
|
||||
private final InternalHistogram.Factory histogramFactory;
|
||||
private final double minBound, maxBound;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
public HistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,
|
||||
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,
|
||||
DocValueFormat formatter, InternalHistogram.Factory<?> histogramFactory, AggregationContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
public HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset,
|
||||
InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
@Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter,
|
||||
AggregationContext aggregationContext, Aggregator parent,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.rounding = rounding;
|
||||
if (interval <= 0) {
|
||||
throw new IllegalArgumentException("interval must be positive, got: " + interval);
|
||||
}
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.extendedBounds = extendedBounds;
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
this.valuesSource = valuesSource;
|
||||
this.formatter = formatter;
|
||||
this.histogramFactory = histogramFactory;
|
||||
|
||||
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
|
||||
}
|
||||
|
@ -90,7 +95,8 @@ public class HistogramAggregator extends BucketsAggregator {
|
|||
if (valuesSource == null) {
|
||||
return LeafBucketCollector.NO_OP_COLLECTOR;
|
||||
}
|
||||
final SortedNumericDocValues values = valuesSource.longValues(ctx);
|
||||
|
||||
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
|
||||
return new LeafBucketCollectorBase(sub, values) {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
|
@ -98,15 +104,15 @@ public class HistogramAggregator extends BucketsAggregator {
|
|||
values.setDocument(doc);
|
||||
final int valuesCount = values.count();
|
||||
|
||||
long previousKey = Long.MIN_VALUE;
|
||||
double previousKey = Double.NEGATIVE_INFINITY;
|
||||
for (int i = 0; i < valuesCount; ++i) {
|
||||
long value = values.valueAt(i);
|
||||
long key = rounding.roundKey(value);
|
||||
double value = values.valueAt(i);
|
||||
double key = Math.floor((value - offset) / interval);
|
||||
assert key >= previousKey;
|
||||
if (key == previousKey) {
|
||||
continue;
|
||||
}
|
||||
long bucketOrd = bucketOrds.add(key);
|
||||
long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key));
|
||||
if (bucketOrd < 0) { // already seen
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
collectExistingBucket(sub, doc, bucketOrd);
|
||||
|
@ -120,26 +126,32 @@ public class HistogramAggregator extends BucketsAggregator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
|
||||
assert owningBucketOrdinal == 0;
|
||||
public InternalAggregation buildAggregation(long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
|
||||
for (long i = 0; i < bucketOrds.size(); i++) {
|
||||
buckets.add(histogramFactory.createBucket(rounding.valueForKey(bucketOrds.get(i)), bucketDocCount(i), bucketAggregations(i), keyed, formatter));
|
||||
double roundKey = Double.longBitsToDouble(bucketOrds.get(i));
|
||||
double key = roundKey * interval + offset;
|
||||
buckets.add(new InternalHistogram.Bucket(key, bucketDocCount(i), keyed, formatter, bucketAggregations(i)));
|
||||
}
|
||||
|
||||
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
|
||||
CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());
|
||||
|
||||
// value source will be null for unmapped fields
|
||||
InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null;
|
||||
return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());
|
||||
EmptyBucketInfo emptyBucketInfo = null;
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
|
||||
}
|
||||
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null;
|
||||
return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(),
|
||||
metaData());
|
||||
EmptyBucketInfo emptyBucketInfo = null;
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
|
||||
}
|
||||
return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,24 +19,66 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, HistogramAggregatorFactory> {
|
||||
|
||||
public class HistogramAggregatorFactory extends AbstractHistogramAggregatorFactory<HistogramAggregatorFactory> {
|
||||
private final double interval, offset;
|
||||
private final InternalOrder order;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final double minBound, maxBound;
|
||||
|
||||
public HistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, long interval, long offset,
|
||||
InternalOrder order, boolean keyed, long minDocCount, ExtendedBounds extendedBounds, AggregationContext context,
|
||||
AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
super(name, type, config, interval, offset, order, keyed, minDocCount, extendedBounds, InternalHistogram.HISTOGRAM_FACTORY, context,
|
||||
parent, subFactoriesBuilder, metaData);
|
||||
HistogramAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, double interval, double offset,
|
||||
InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
AggregationContext context, AggregatorFactory<?> parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
super(name, type, config, context, parent, subFactoriesBuilder, metaData);
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
}
|
||||
|
||||
}
|
||||
public long minDocCount() {
|
||||
return minDocCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
if (collectsFromSingleBucket == false) {
|
||||
return asMultiBucketAggregator(this, context, parent);
|
||||
}
|
||||
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
|
||||
return new HistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, valuesSource,
|
||||
config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
return createAggregator(null, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** Implemented by histogram aggregations and used by pipeline aggregations to insert buckets. */
|
||||
// public so that pipeline aggs can use this API: can we fix it?
|
||||
public interface HistogramFactory {
|
||||
|
||||
/** Get the key for the given bucket. Date histograms must return the
|
||||
* number of millis since Epoch of the bucket key while numeric histograms
|
||||
* must return the double value of the key. */
|
||||
Number getKey(MultiBucketsAggregation.Bucket bucket);
|
||||
|
||||
/** Given a key returned by {@link #getKey}, compute the lowest key that is
|
||||
* greater than it. */
|
||||
Number nextKey(Number key);
|
||||
|
||||
/** Create an {@link InternalAggregation} object that wraps the given buckets. */
|
||||
InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets);
|
||||
|
||||
/** Create a {@link MultiBucketsAggregation.Bucket} object that wraps the
|
||||
* given key, document count and aggregations. */
|
||||
MultiBucketsAggregation.Bucket createBucket(Number key, long docCount, InternalAggregations aggregations);
|
||||
|
||||
}
|
|
@ -20,8 +20,9 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
|
||||
|
@ -32,46 +33,51 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Parses the histogram request
|
||||
* A parser for date histograms. This translates json into an
|
||||
* {@link HistogramAggregationBuilder} instance.
|
||||
*/
|
||||
public class HistogramParser extends NumericValuesSourceParser {
|
||||
|
||||
private static final ObjectParser<double[], ParseFieldMatcherSupplier> EXTENDED_BOUNDS_PARSER = new ObjectParser<>(
|
||||
Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName(),
|
||||
() -> new double[]{ Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY });
|
||||
static {
|
||||
EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[0] = d, new ParseField("min"));
|
||||
EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[1] = d, new ParseField("max"));
|
||||
}
|
||||
|
||||
public HistogramParser() {
|
||||
super(true, true, false);
|
||||
}
|
||||
|
||||
protected HistogramParser(boolean timezoneAware) {
|
||||
super(true, true, timezoneAware);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AbstractHistogramBuilder<?> createFactory(String aggregationName, ValuesSourceType valuesSourceType,
|
||||
protected HistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType,
|
||||
ValueType targetValueType, Map<ParseField, Object> otherOptions) {
|
||||
HistogramAggregationBuilder factory = new HistogramAggregationBuilder(aggregationName);
|
||||
Long interval = (Long) otherOptions.get(Rounding.Interval.INTERVAL_FIELD);
|
||||
Double interval = (Double) otherOptions.get(Histogram.INTERVAL_FIELD);
|
||||
if (interval == null) {
|
||||
throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
|
||||
} else {
|
||||
factory.interval(interval);
|
||||
}
|
||||
Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);
|
||||
Double offset = (Double) otherOptions.get(Histogram.OFFSET_FIELD);
|
||||
if (offset != null) {
|
||||
factory.offset(offset);
|
||||
}
|
||||
|
||||
ExtendedBounds extendedBounds = (ExtendedBounds) otherOptions.get(ExtendedBounds.EXTENDED_BOUNDS_FIELD);
|
||||
double[] extendedBounds = (double[]) otherOptions.get(Histogram.EXTENDED_BOUNDS_FIELD);
|
||||
if (extendedBounds != null) {
|
||||
factory.extendedBounds(extendedBounds);
|
||||
factory.extendedBounds(extendedBounds[0], extendedBounds[1]);
|
||||
}
|
||||
Boolean keyed = (Boolean) otherOptions.get(HistogramAggregator.KEYED_FIELD);
|
||||
Boolean keyed = (Boolean) otherOptions.get(Histogram.KEYED_FIELD);
|
||||
if (keyed != null) {
|
||||
factory.keyed(keyed);
|
||||
}
|
||||
Long minDocCount = (Long) otherOptions.get(HistogramAggregator.MIN_DOC_COUNT_FIELD);
|
||||
Long minDocCount = (Long) otherOptions.get(Histogram.MIN_DOC_COUNT_FIELD);
|
||||
if (minDocCount != null) {
|
||||
factory.minDocCount(minDocCount);
|
||||
}
|
||||
InternalOrder order = (InternalOrder) otherOptions.get(HistogramAggregator.ORDER_FIELD);
|
||||
InternalOrder order = (InternalOrder) otherOptions.get(Histogram.ORDER_FIELD);
|
||||
if (order != null) {
|
||||
factory.order(order);
|
||||
}
|
||||
|
@ -82,33 +88,23 @@ public class HistogramParser extends NumericValuesSourceParser {
|
|||
protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
|
||||
ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
|
||||
if (token.isValue()) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parseStringInterval(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.MIN_DOC_COUNT_FIELD)) {
|
||||
otherOptions.put(HistogramAggregator.MIN_DOC_COUNT_FIELD, parser.longValue());
|
||||
if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {
|
||||
otherOptions.put(Histogram.INTERVAL_FIELD, parser.doubleValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.KEYED_FIELD)) {
|
||||
otherOptions.put(HistogramAggregator.KEYED_FIELD, parser.booleanValue());
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
|
||||
otherOptions.put(Histogram.MIN_DOC_COUNT_FIELD, parser.longValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
|
||||
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {
|
||||
otherOptions.put(Histogram.OFFSET_FIELD, parser.doubleValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parseStringOffset(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (parseFieldMatcher.match(currentFieldName, HistogramAggregator.ORDER_FIELD)) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Histogram.ORDER_FIELD)) {
|
||||
InternalOrder order = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -124,14 +120,11 @@ public class HistogramParser extends NumericValuesSourceParser {
|
|||
order = resolveOrder(currentFieldName, asc);
|
||||
}
|
||||
}
|
||||
otherOptions.put(HistogramAggregator.ORDER_FIELD, order);
|
||||
otherOptions.put(Histogram.ORDER_FIELD, order);
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, ExtendedBounds.EXTENDED_BOUNDS_FIELD)) {
|
||||
try {
|
||||
otherOptions.put(ExtendedBounds.EXTENDED_BOUNDS_FIELD, ExtendedBounds.PARSER.apply(parser, () -> parseFieldMatcher));
|
||||
} catch (Exception e) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Error parsing [{}]", e, aggregationName);
|
||||
}
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.EXTENDED_BOUNDS_FIELD)) {
|
||||
double[] bounds = EXTENDED_BOUNDS_PARSER.apply(parser, () -> parseFieldMatcher);
|
||||
otherOptions.put(Histogram.EXTENDED_BOUNDS_FIELD, bounds);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -141,14 +134,6 @@ public class HistogramParser extends NumericValuesSourceParser {
|
|||
}
|
||||
}
|
||||
|
||||
protected Object parseStringInterval(String interval) {
|
||||
return Long.valueOf(interval);
|
||||
}
|
||||
|
||||
protected long parseStringOffset(String offset) throws IOException {
|
||||
return Long.valueOf(offset);
|
||||
}
|
||||
|
||||
static InternalOrder resolveOrder(String key, boolean asc) {
|
||||
if ("_key".equals(key)) {
|
||||
return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
|
||||
|
|
|
@ -18,36 +18,71 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Results of a date_historgram aggregation.
|
||||
* Imelementation of {@link Histogram}.
|
||||
*/
|
||||
public class InternalDateHistogram {
|
||||
public final class InternalDateHistogram extends InternalMultiBucketAggregation<InternalDateHistogram, InternalDateHistogram.Bucket>
|
||||
implements Histogram, HistogramFactory {
|
||||
|
||||
public static final Factory HISTOGRAM_FACTORY = new Factory();
|
||||
static final Type TYPE = new Type("date_histogram");
|
||||
|
||||
static class Bucket extends InternalHistogram.Bucket {
|
||||
Bucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter,
|
||||
InternalHistogram.Factory<Bucket> factory) {
|
||||
super(key, docCount, keyed, formatter, factory, aggregations);
|
||||
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket {
|
||||
|
||||
final long key;
|
||||
final long docCount;
|
||||
final InternalAggregations aggregations;
|
||||
private final transient boolean keyed;
|
||||
protected final transient DocValueFormat format;
|
||||
|
||||
public Bucket(long key, long docCount, boolean keyed, DocValueFormat format,
|
||||
InternalAggregations aggregations) {
|
||||
this.format = format;
|
||||
this.keyed = keyed;
|
||||
this.key = key;
|
||||
this.docCount = docCount;
|
||||
this.aggregations = aggregations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
Bucket(StreamInput in, boolean keyed, DocValueFormat formatter, InternalHistogram.Factory<Bucket> factory) throws IOException {
|
||||
super(in, keyed, formatter, factory);
|
||||
public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
|
||||
this.format = format;
|
||||
this.keyed = keyed;
|
||||
key = in.readLong();
|
||||
docCount = in.readVLong();
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(key);
|
||||
out.writeVLong(docCount);
|
||||
aggregations.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,53 +91,356 @@ public class InternalDateHistogram {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DateTime getKey() {
|
||||
public Object getKey() {
|
||||
return new DateTime(key, DateTimeZone.UTC);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getKeyAsString();
|
||||
public long getDocCount() {
|
||||
return docCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Aggregations getAggregations() {
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
Bucket reduce(List<Bucket> buckets, ReduceContext context) {
|
||||
List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());
|
||||
long docCount = 0;
|
||||
for (Bucket bucket : buckets) {
|
||||
docCount += bucket.docCount;
|
||||
aggregations.add((InternalAggregations) bucket.getAggregations());
|
||||
}
|
||||
InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
|
||||
return new InternalDateHistogram.Bucket(key, docCount, keyed, format, aggs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
String keyAsString = format.format(key);
|
||||
if (keyed) {
|
||||
builder.startObject(keyAsString);
|
||||
} else {
|
||||
builder.startObject();
|
||||
}
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, keyAsString);
|
||||
}
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public DocValueFormat getFormatter() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public boolean getKeyed() {
|
||||
return keyed;
|
||||
}
|
||||
}
|
||||
|
||||
static class Factory extends InternalHistogram.Factory<InternalDateHistogram.Bucket> {
|
||||
static class EmptyBucketInfo {
|
||||
|
||||
Factory() {
|
||||
final Rounding rounding;
|
||||
final InternalAggregations subAggregations;
|
||||
final ExtendedBounds bounds;
|
||||
|
||||
EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations) {
|
||||
this(rounding, subAggregations, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type type() {
|
||||
return TYPE;
|
||||
EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations, ExtendedBounds bounds) {
|
||||
this.rounding = rounding;
|
||||
this.subAggregations = subAggregations;
|
||||
this.bounds = bounds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueType valueType() {
|
||||
return ValueType.DATE;
|
||||
EmptyBucketInfo(StreamInput in) throws IOException {
|
||||
rounding = Rounding.Streams.read(in);
|
||||
subAggregations = InternalAggregations.readAggregations(in);
|
||||
bounds = in.readOptionalWriteable(ExtendedBounds::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalDateHistogram.Bucket createBucket(InternalAggregations aggregations, InternalDateHistogram.Bucket prototype) {
|
||||
return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.getKeyed(), prototype.format, this);
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
Rounding.Streams.write(rounding, out);
|
||||
subAggregations.writeTo(out);
|
||||
out.writeOptionalWriteable(bounds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed,
|
||||
DocValueFormat formatter) {
|
||||
if (key instanceof Number) {
|
||||
return new Bucket(((Number) key).longValue(), docCount, aggregations, keyed, formatter, this);
|
||||
} else if (key instanceof DateTime) {
|
||||
return new Bucket(((DateTime) key).getMillis(), docCount, aggregations, keyed, formatter, this);
|
||||
} else {
|
||||
throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + key + "]");
|
||||
}
|
||||
|
||||
private final List<Bucket> buckets;
|
||||
private final InternalOrder order;
|
||||
private final DocValueFormat format;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final EmptyBucketInfo emptyBucketInfo;
|
||||
|
||||
InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
|
||||
DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
this.buckets = buckets;
|
||||
this.order = order;
|
||||
assert (minDocCount == 0) == (emptyBucketInfo != null);
|
||||
this.minDocCount = minDocCount;
|
||||
this.emptyBucketInfo = emptyBucketInfo;
|
||||
this.format = formatter;
|
||||
this.keyed = keyed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream from a stream.
|
||||
*/
|
||||
public InternalDateHistogram(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
minDocCount = in.readVLong();
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = new EmptyBucketInfo(in);
|
||||
} else {
|
||||
emptyBucketInfo = null;
|
||||
}
|
||||
format = in.readNamedWriteable(DocValueFormat.class);
|
||||
keyed = in.readBoolean();
|
||||
buckets = in.readList(stream -> new Bucket(stream, keyed, format));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
out.writeVLong(minDocCount);
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo.writeTo(out);
|
||||
}
|
||||
out.writeNamedWriteable(format);
|
||||
out.writeBoolean(keyed);
|
||||
out.writeList(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return DateHistogramAggregationBuilder.NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Histogram.Bucket> getBuckets() {
|
||||
return Collections.unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalDateHistogram create(List<Bucket> buckets) {
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format,
|
||||
keyed, pipelineAggregators(), metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
|
||||
return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations);
|
||||
}
|
||||
|
||||
private static class IteratorAndCurrent {
|
||||
|
||||
private final Iterator<Bucket> iterator;
|
||||
private Bucket current;
|
||||
|
||||
IteratorAndCurrent(Iterator<Bucket> iterator) {
|
||||
this.iterator = iterator;
|
||||
current = iterator.next();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private List<Bucket> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {
|
||||
@Override
|
||||
protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
|
||||
return a.current.key < b.current.key;
|
||||
}
|
||||
};
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalDateHistogram histogram = (InternalDateHistogram) aggregation;
|
||||
if (histogram.buckets.isEmpty() == false) {
|
||||
pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Bucket readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
|
||||
return new Bucket(in, keyed, format, this);
|
||||
List<Bucket> reducedBuckets = new ArrayList<>();
|
||||
if (pq.size() > 0) {
|
||||
// list of buckets coming from different shards that have the same key
|
||||
List<Bucket> currentBuckets = new ArrayList<>();
|
||||
double key = pq.top().current.key;
|
||||
|
||||
do {
|
||||
final IteratorAndCurrent top = pq.top();
|
||||
|
||||
if (top.current.key != key) {
|
||||
// the key changes, reduce what we already buffered and reset the buffer for current buckets
|
||||
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
if (reduced.getDocCount() >= minDocCount) {
|
||||
reducedBuckets.add(reduced);
|
||||
}
|
||||
currentBuckets.clear();
|
||||
key = top.current.key;
|
||||
}
|
||||
|
||||
currentBuckets.add(top.current);
|
||||
|
||||
if (top.iterator.hasNext()) {
|
||||
final Bucket next = top.iterator.next();
|
||||
assert next.key > top.current.key : "shards must return data sorted by key";
|
||||
top.current = next;
|
||||
pq.updateTop();
|
||||
} else {
|
||||
pq.pop();
|
||||
}
|
||||
} while (pq.size() > 0);
|
||||
|
||||
if (currentBuckets.isEmpty() == false) {
|
||||
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
if (reduced.getDocCount() >= minDocCount) {
|
||||
reducedBuckets.add(reduced);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reducedBuckets;
|
||||
}
|
||||
|
||||
private void addEmptyBuckets(List<Bucket> list, ReduceContext reduceContext) {
|
||||
Bucket lastBucket = null;
|
||||
ExtendedBounds bounds = emptyBucketInfo.bounds;
|
||||
ListIterator<Bucket> iter = list.listIterator();
|
||||
|
||||
// first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
|
||||
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),
|
||||
reduceContext);
|
||||
if (bounds != null) {
|
||||
Bucket firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
|
||||
if (firstBucket == null) {
|
||||
if (bounds.getMin() != null && bounds.getMax() != null) {
|
||||
long key = bounds.getMin();
|
||||
long max = bounds.getMax();
|
||||
while (key <= max) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (bounds.getMin() != null) {
|
||||
long key = bounds.getMin();
|
||||
if (key < firstBucket.key) {
|
||||
while (key < firstBucket.key) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now adding the empty buckets within the actual data,
|
||||
// e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6
|
||||
while (iter.hasNext()) {
|
||||
Bucket nextBucket = list.get(iter.nextIndex());
|
||||
if (lastBucket != null) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
while (key < nextBucket.key) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
assert key == nextBucket.key;
|
||||
}
|
||||
lastBucket = iter.next();
|
||||
}
|
||||
|
||||
// finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
|
||||
if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() > lastBucket.key) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
long max = bounds.getMax();
|
||||
while (key <= max) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private InternalDateHistogram() {}
|
||||
@Override
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
List<Bucket> reducedBuckets = reduceBuckets(aggregations, reduceContext);
|
||||
|
||||
// adding empty buckets if needed
|
||||
if (minDocCount == 0) {
|
||||
addEmptyBuckets(reducedBuckets, reduceContext);
|
||||
}
|
||||
|
||||
if (order == InternalOrder.KEY_ASC) {
|
||||
// nothing to do, data are already sorted since shards return
|
||||
// sorted buckets and the merge-sort performed by reduceBuckets
|
||||
// maintains order
|
||||
} else if (order == InternalOrder.KEY_DESC) {
|
||||
// we just need to reverse here...
|
||||
List<Bucket> reverse = new ArrayList<>(reducedBuckets);
|
||||
Collections.reverse(reverse);
|
||||
reducedBuckets = reverse;
|
||||
} else {
|
||||
// sorted by sub-aggregation, need to fall back to a costly n*log(n) sort
|
||||
CollectionUtil.introSort(reducedBuckets, order.comparator());
|
||||
}
|
||||
|
||||
return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo,
|
||||
format, keyed, pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
}
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
if (keyed) {
|
||||
builder.endObject();
|
||||
} else {
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
// HistogramFactory method impls
|
||||
|
||||
@Override
|
||||
public Number getKey(MultiBucketsAggregation.Bucket bucket) {
|
||||
return ((Bucket) bucket).key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number nextKey(Number key) {
|
||||
return emptyBucketInfo.rounding.nextRoundingValue(key.longValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets) {
|
||||
// convert buckets to the right type
|
||||
List<Bucket> buckets2 = new ArrayList<>(buckets.size());
|
||||
for (Object b : buckets) {
|
||||
buckets2.add((Bucket) b);
|
||||
}
|
||||
buckets2 = Collections.unmodifiableList(buckets2);
|
||||
return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,
|
||||
keyed, pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {
|
||||
return new Bucket(key.longValue(), docCount, keyed, format, aggregations);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,16 +22,14 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -42,28 +40,25 @@ import java.util.ListIterator;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})?
|
||||
* Imelementation of {@link Histogram}.
|
||||
*/
|
||||
public class InternalHistogram<B extends InternalHistogram.Bucket> extends InternalMultiBucketAggregation<InternalHistogram<B>, B>
|
||||
implements Histogram {
|
||||
public final class InternalHistogram extends InternalMultiBucketAggregation<InternalHistogram, InternalHistogram.Bucket>
|
||||
implements Histogram, HistogramFactory {
|
||||
|
||||
public static final Factory<Bucket> HISTOGRAM_FACTORY = new Factory<Bucket>();
|
||||
static final Type TYPE = new Type("histogram");
|
||||
|
||||
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket {
|
||||
|
||||
final long key;
|
||||
final double key;
|
||||
final long docCount;
|
||||
final InternalAggregations aggregations;
|
||||
private final transient boolean keyed;
|
||||
protected final transient DocValueFormat format;
|
||||
private final Factory<?> factory;
|
||||
|
||||
public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, Factory<?> factory,
|
||||
public Bucket(double key, long docCount, boolean keyed, DocValueFormat format,
|
||||
InternalAggregations aggregations) {
|
||||
this.format = format;
|
||||
this.keyed = keyed;
|
||||
this.factory = factory;
|
||||
this.key = key;
|
||||
this.docCount = docCount;
|
||||
this.aggregations = aggregations;
|
||||
|
@ -72,26 +67,21 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public Bucket(StreamInput in, boolean keyed, DocValueFormat format, Factory<?> factory) throws IOException {
|
||||
public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
|
||||
this.format = format;
|
||||
this.keyed = keyed;
|
||||
this.factory = factory;
|
||||
key = in.readLong();
|
||||
key = in.readDouble();
|
||||
docCount = in.readVLong();
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(key);
|
||||
out.writeDouble(key);
|
||||
out.writeVLong(docCount);
|
||||
aggregations.writeTo(out);
|
||||
}
|
||||
|
||||
protected Factory<?> getFactory() {
|
||||
return factory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
return format.format(key);
|
||||
|
@ -112,8 +102,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
return aggregations;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
<B extends Bucket> B reduce(List<B> buckets, ReduceContext context) {
|
||||
Bucket reduce(List<Bucket> buckets, ReduceContext context) {
|
||||
List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());
|
||||
long docCount = 0;
|
||||
for (Bucket bucket : buckets) {
|
||||
|
@ -121,7 +110,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
aggregations.add((InternalAggregations) bucket.getAggregations());
|
||||
}
|
||||
InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
|
||||
return (B) getFactory().createBucket(key, docCount, aggs, keyed, format);
|
||||
return new InternalHistogram.Bucket(key, docCount, keyed, format, aggs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -153,96 +142,40 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
|
||||
static class EmptyBucketInfo {
|
||||
|
||||
final Rounding rounding;
|
||||
final double interval, offset, minBound, maxBound;
|
||||
final InternalAggregations subAggregations;
|
||||
final ExtendedBounds bounds;
|
||||
|
||||
EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations) {
|
||||
this(rounding, subAggregations, null);
|
||||
}
|
||||
|
||||
EmptyBucketInfo(Rounding rounding, InternalAggregations subAggregations, ExtendedBounds bounds) {
|
||||
this.rounding = rounding;
|
||||
EmptyBucketInfo(double interval, double offset, double minBound, double maxBound, InternalAggregations subAggregations) {
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
this.subAggregations = subAggregations;
|
||||
this.bounds = bounds;
|
||||
}
|
||||
|
||||
public static EmptyBucketInfo readFrom(StreamInput in) throws IOException {
|
||||
Rounding rounding = Rounding.Streams.read(in);
|
||||
InternalAggregations aggs = InternalAggregations.readAggregations(in);
|
||||
if (in.readBoolean()) {
|
||||
return new EmptyBucketInfo(rounding, aggs, new ExtendedBounds(in));
|
||||
}
|
||||
return new EmptyBucketInfo(rounding, aggs);
|
||||
EmptyBucketInfo(StreamInput in) throws IOException {
|
||||
this(in.readDouble(), in.readDouble(), in.readDouble(), in.readDouble(), InternalAggregations.readAggregations(in));
|
||||
}
|
||||
|
||||
public static void writeTo(EmptyBucketInfo info, StreamOutput out) throws IOException {
|
||||
Rounding.Streams.write(info.rounding, out);
|
||||
info.subAggregations.writeTo(out);
|
||||
out.writeBoolean(info.bounds != null);
|
||||
if (info.bounds != null) {
|
||||
info.bounds.writeTo(out);
|
||||
}
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(interval);
|
||||
out.writeDouble(offset);
|
||||
out.writeDouble(minBound);
|
||||
out.writeDouble(maxBound);
|
||||
subAggregations.writeTo(out);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class Factory<B extends InternalHistogram.Bucket> {
|
||||
|
||||
protected Factory() {
|
||||
}
|
||||
|
||||
public Type type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
public ValueType valueType() {
|
||||
return ValueType.NUMERIC;
|
||||
}
|
||||
|
||||
public InternalHistogram<B> create(String name, List<B> buckets, InternalOrder order, long minDocCount,
|
||||
EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, pipelineAggregators,
|
||||
metaData);
|
||||
}
|
||||
|
||||
public InternalHistogram<B> create(List<B> buckets, InternalHistogram<B> prototype) {
|
||||
return new InternalHistogram<>(prototype.name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo,
|
||||
prototype.format, prototype.keyed, this, prototype.pipelineAggregators(), prototype.metaData);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public B createBucket(InternalAggregations aggregations, B prototype) {
|
||||
return (B) new Bucket(prototype.key, prototype.docCount, prototype.getKeyed(), prototype.format, this, aggregations);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter) {
|
||||
if (key instanceof Number) {
|
||||
return (B) new Bucket(((Number) key).longValue(), docCount, keyed, formatter, this, aggregations);
|
||||
} else {
|
||||
throw new AggregationExecutionException("Expected key of type Number but got [" + key + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected B readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
|
||||
return (B) new Bucket(in, keyed, format, this);
|
||||
}
|
||||
}
|
||||
|
||||
private final List<B> buckets;
|
||||
private final List<Bucket> buckets;
|
||||
private final InternalOrder order;
|
||||
private final DocValueFormat format;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final EmptyBucketInfo emptyBucketInfo;
|
||||
private final Factory<B> factory;
|
||||
|
||||
InternalHistogram(String name, List<B> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
|
||||
DocValueFormat formatter, boolean keyed, Factory<B> factory, List<PipelineAggregator> pipelineAggregators,
|
||||
InternalHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
|
||||
DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
this.buckets = buckets;
|
||||
|
@ -252,7 +185,6 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
this.emptyBucketInfo = emptyBucketInfo;
|
||||
this.format = formatter;
|
||||
this.keyed = keyed;
|
||||
this.factory = factory;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -260,37 +192,24 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
*/
|
||||
public InternalHistogram(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
factory = resolveFactory(in.readString());
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
minDocCount = in.readVLong();
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = EmptyBucketInfo.readFrom(in);
|
||||
emptyBucketInfo = new EmptyBucketInfo(in);
|
||||
} else {
|
||||
emptyBucketInfo = null;
|
||||
}
|
||||
format = in.readNamedWriteable(DocValueFormat.class);
|
||||
keyed = in.readBoolean();
|
||||
buckets = in.readList(stream -> factory.readBucket(stream, keyed, format));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected static <B extends InternalHistogram.Bucket> Factory<B> resolveFactory(String factoryType) {
|
||||
if (factoryType.equals(InternalDateHistogram.TYPE.name())) {
|
||||
return (Factory<B>) new InternalDateHistogram.Factory();
|
||||
} else if (factoryType.equals(TYPE.name())) {
|
||||
return new Factory<>();
|
||||
} else {
|
||||
throw new IllegalStateException("Invalid histogram factory type [" + factoryType + "]");
|
||||
}
|
||||
buckets = in.readList(stream -> new Bucket(stream, keyed, format));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeString(factory.type().name());
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
out.writeVLong(minDocCount);
|
||||
if (minDocCount == 0) {
|
||||
EmptyBucketInfo.writeTo(emptyBucketInfo, out);
|
||||
emptyBucketInfo.writeTo(out);
|
||||
}
|
||||
out.writeNamedWriteable(format);
|
||||
out.writeBoolean(keyed);
|
||||
|
@ -303,68 +222,59 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<B> getBuckets() {
|
||||
return buckets;
|
||||
}
|
||||
|
||||
public Factory<B> getFactory() {
|
||||
return factory;
|
||||
}
|
||||
|
||||
public Rounding getRounding() {
|
||||
return emptyBucketInfo.rounding;
|
||||
public List<Histogram.Bucket> getBuckets() {
|
||||
return Collections.unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalHistogram<B> create(List<B> buckets) {
|
||||
return getFactory().create(buckets, this);
|
||||
public InternalHistogram create(List<Bucket> buckets) {
|
||||
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(), metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
public B createBucket(InternalAggregations aggregations, B prototype) {
|
||||
return getFactory().createBucket(aggregations, prototype);
|
||||
public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
|
||||
return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations);
|
||||
}
|
||||
|
||||
private static class IteratorAndCurrent<B> {
|
||||
private static class IteratorAndCurrent {
|
||||
|
||||
private final Iterator<B> iterator;
|
||||
private B current;
|
||||
private final Iterator<Bucket> iterator;
|
||||
private Bucket current;
|
||||
|
||||
IteratorAndCurrent(Iterator<B> iterator) {
|
||||
IteratorAndCurrent(Iterator<Bucket> iterator) {
|
||||
this.iterator = iterator;
|
||||
current = iterator.next();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private List<B> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
private List<Bucket> reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
final PriorityQueue<IteratorAndCurrent<B>> pq = new PriorityQueue<IteratorAndCurrent<B>>(aggregations.size()) {
|
||||
final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {
|
||||
@Override
|
||||
protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {
|
||||
protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
|
||||
return a.current.key < b.current.key;
|
||||
}
|
||||
};
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
@SuppressWarnings("unchecked")
|
||||
InternalHistogram<B> histogram = (InternalHistogram<B>) aggregation;
|
||||
InternalHistogram histogram = (InternalHistogram) aggregation;
|
||||
if (histogram.buckets.isEmpty() == false) {
|
||||
pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator()));
|
||||
pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));
|
||||
}
|
||||
}
|
||||
|
||||
List<B> reducedBuckets = new ArrayList<>();
|
||||
List<Bucket> reducedBuckets = new ArrayList<>();
|
||||
if (pq.size() > 0) {
|
||||
// list of buckets coming from different shards that have the same key
|
||||
List<B> currentBuckets = new ArrayList<>();
|
||||
long key = pq.top().current.key;
|
||||
List<Bucket> currentBuckets = new ArrayList<>();
|
||||
double key = pq.top().current.key;
|
||||
|
||||
do {
|
||||
final IteratorAndCurrent<B> top = pq.top();
|
||||
final IteratorAndCurrent top = pq.top();
|
||||
|
||||
if (top.current.key != key) {
|
||||
// the key changes, reduce what we already buffered and reset the buffer for current buckets
|
||||
final B reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
if (reduced.getDocCount() >= minDocCount) {
|
||||
reducedBuckets.add(reduced);
|
||||
}
|
||||
|
@ -375,7 +285,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
currentBuckets.add(top.current);
|
||||
|
||||
if (top.iterator.hasNext()) {
|
||||
final B next = top.iterator.next();
|
||||
final Bucket next = top.iterator.next();
|
||||
assert next.key > top.current.key : "shards must return data sorted by key";
|
||||
top.current = next;
|
||||
pq.updateTop();
|
||||
|
@ -385,7 +295,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
} while (pq.size() > 0);
|
||||
|
||||
if (currentBuckets.isEmpty() == false) {
|
||||
final B reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
|
||||
if (reduced.getDocCount() >= minDocCount) {
|
||||
reducedBuckets.add(reduced);
|
||||
}
|
||||
|
@ -395,75 +305,62 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
return reducedBuckets;
|
||||
}
|
||||
|
||||
private void addEmptyBuckets(List<B> list, ReduceContext reduceContext) {
|
||||
B lastBucket = null;
|
||||
ExtendedBounds bounds = emptyBucketInfo.bounds;
|
||||
ListIterator<B> iter = list.listIterator();
|
||||
private double nextKey(double key) {
|
||||
return round(key + emptyBucketInfo.interval + emptyBucketInfo.interval / 2);
|
||||
}
|
||||
|
||||
private double round(double key) {
|
||||
return Math.floor((key - emptyBucketInfo.offset) / emptyBucketInfo.interval) * emptyBucketInfo.interval + emptyBucketInfo.offset;
|
||||
}
|
||||
|
||||
private void addEmptyBuckets(List<Bucket> list, ReduceContext reduceContext) {
|
||||
ListIterator<Bucket> iter = list.listIterator();
|
||||
|
||||
// first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
|
||||
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),
|
||||
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(
|
||||
Collections.singletonList(emptyBucketInfo.subAggregations),
|
||||
reduceContext);
|
||||
if (bounds != null) {
|
||||
B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
|
||||
if (firstBucket == null) {
|
||||
if (bounds.getMin() != null && bounds.getMax() != null) {
|
||||
long key = bounds.getMin();
|
||||
long max = bounds.getMax();
|
||||
while (key <= max) {
|
||||
iter.add(getFactory().createBucket(key, 0,
|
||||
reducedEmptySubAggs,
|
||||
keyed, format));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (bounds.getMin() != null) {
|
||||
long key = bounds.getMin();
|
||||
if (key < firstBucket.key) {
|
||||
while (key < firstBucket.key) {
|
||||
iter.add(getFactory().createBucket(key, 0,
|
||||
reducedEmptySubAggs,
|
||||
keyed, format));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
}
|
||||
}
|
||||
|
||||
if (iter.hasNext() == false) {
|
||||
// fill with empty buckets
|
||||
for (double key = round(emptyBucketInfo.minBound); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {
|
||||
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
}
|
||||
} else {
|
||||
Bucket first = list.get(iter.nextIndex());
|
||||
if (Double.isFinite(emptyBucketInfo.minBound)) {
|
||||
// fill with empty buckets until the first key
|
||||
for (double key = round(emptyBucketInfo.minBound); key < first.key; key = nextKey(key)) {
|
||||
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now adding the empty buckets within the actual data,
|
||||
// e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6
|
||||
while (iter.hasNext()) {
|
||||
B nextBucket = list.get(iter.nextIndex());
|
||||
if (lastBucket != null) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
while (key < nextBucket.key) {
|
||||
iter.add(getFactory().createBucket(key, 0,
|
||||
reducedEmptySubAggs, keyed,
|
||||
format));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
// now adding the empty buckets within the actual data,
|
||||
// e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6
|
||||
Bucket lastBucket = null;
|
||||
do {
|
||||
Bucket nextBucket = list.get(iter.nextIndex());
|
||||
if (lastBucket != null) {
|
||||
double key = nextKey(lastBucket.key);
|
||||
while (key < nextBucket.key) {
|
||||
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = nextKey(key);
|
||||
}
|
||||
assert key == nextBucket.key;
|
||||
}
|
||||
assert key == nextBucket.key;
|
||||
}
|
||||
lastBucket = iter.next();
|
||||
}
|
||||
lastBucket = iter.next();
|
||||
} while (iter.hasNext());
|
||||
|
||||
// finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
|
||||
if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() > lastBucket.key) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
long max = bounds.getMax();
|
||||
while (key <= max) {
|
||||
iter.add(getFactory().createBucket(key, 0,
|
||||
reducedEmptySubAggs, keyed,
|
||||
format));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
// finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
|
||||
for (double key = nextKey(lastBucket.key); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {
|
||||
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
List<B> reducedBuckets = reduceBuckets(aggregations, reduceContext);
|
||||
List<Bucket> reducedBuckets = reduceBuckets(aggregations, reduceContext);
|
||||
|
||||
// adding empty buckets if needed
|
||||
if (minDocCount == 0) {
|
||||
|
@ -476,7 +373,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
// maintains order
|
||||
} else if (order == InternalOrder.KEY_DESC) {
|
||||
// we just need to reverse here...
|
||||
List<B> reverse = new ArrayList<>(reducedBuckets);
|
||||
List<Bucket> reverse = new ArrayList<>(reducedBuckets);
|
||||
Collections.reverse(reverse);
|
||||
reducedBuckets = reverse;
|
||||
} else {
|
||||
|
@ -484,7 +381,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
CollectionUtil.introSort(reducedBuckets, order.comparator());
|
||||
}
|
||||
|
||||
return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(),
|
||||
return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(),
|
||||
getMetaData());
|
||||
}
|
||||
|
||||
|
@ -495,7 +392,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
}
|
||||
for (B bucket : buckets) {
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
if (keyed) {
|
||||
|
@ -506,4 +403,33 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
|||
return builder;
|
||||
}
|
||||
|
||||
// HistogramFactory method impls
|
||||
|
||||
@Override
|
||||
public Number getKey(MultiBucketsAggregation.Bucket bucket) {
|
||||
return ((Bucket) bucket).key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number nextKey(Number key) {
|
||||
return nextKey(key.doubleValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets) {
|
||||
// convert buckets to the right type
|
||||
List<Bucket> buckets2 = new ArrayList<>(buckets.size());
|
||||
for (Object b : buckets) {
|
||||
buckets2.add((Bucket) b);
|
||||
}
|
||||
buckets2 = Collections.unmodifiableList(buckets2);
|
||||
return new InternalHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,
|
||||
keyed, pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {
|
||||
return new Bucket(key.doubleValue(), docCount, keyed, format, aggregations);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,9 +35,9 @@ class InternalOrder extends Histogram.Order {
|
|||
final byte id;
|
||||
final String key;
|
||||
final boolean asc;
|
||||
final Comparator<InternalHistogram.Bucket> comparator;
|
||||
final Comparator<Histogram.Bucket> comparator;
|
||||
|
||||
InternalOrder(byte id, String key, boolean asc, Comparator<InternalHistogram.Bucket> comparator) {
|
||||
InternalOrder(byte id, String key, boolean asc, Comparator<Histogram.Bucket> comparator) {
|
||||
this.id = id;
|
||||
this.key = key;
|
||||
this.asc = asc;
|
||||
|
@ -57,7 +57,7 @@ class InternalOrder extends Histogram.Order {
|
|||
}
|
||||
|
||||
@Override
|
||||
Comparator<InternalHistogram.Bucket> comparator() {
|
||||
Comparator<Histogram.Bucket> comparator() {
|
||||
return comparator;
|
||||
}
|
||||
|
||||
|
@ -90,11 +90,7 @@ class InternalOrder extends Histogram.Order {
|
|||
static final byte ID = 0;
|
||||
|
||||
Aggregation(String key, boolean asc) {
|
||||
super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<InternalHistogram.Bucket>(key, asc));
|
||||
}
|
||||
|
||||
private static String key(String aggName, String valueName) {
|
||||
return (valueName == null) ? aggName : aggName + "." + valueName;
|
||||
super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator<Histogram.Bucket>(key, asc));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.index.query.QueryParseContext;
|
|||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.InvalidAggregationPathException;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath;
|
||||
|
||||
|
@ -145,13 +146,13 @@ public class BucketHelpers {
|
|||
* @return The value extracted from <code>bucket</code> found at
|
||||
* <code>aggPath</code>
|
||||
*/
|
||||
public static Double resolveBucketValue(InternalMultiBucketAggregation<?, ? extends InternalMultiBucketAggregation.Bucket> agg,
|
||||
public static Double resolveBucketValue(MultiBucketsAggregation agg,
|
||||
InternalMultiBucketAggregation.Bucket bucket, String aggPath, GapPolicy gapPolicy) {
|
||||
List<String> aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList();
|
||||
return resolveBucketValue(agg, bucket, aggPathsList, gapPolicy);
|
||||
}
|
||||
|
||||
public static Double resolveBucketValue(InternalMultiBucketAggregation<?, ? extends InternalMultiBucketAggregation.Bucket> agg,
|
||||
public static Double resolveBucketValue(MultiBucketsAggregation agg,
|
||||
InternalMultiBucketAggregation.Bucket bucket, List<String> aggPathAsList, GapPolicy gapPolicy) {
|
||||
try {
|
||||
Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList);
|
||||
|
|
|
@ -29,7 +29,8 @@ import org.elasticsearch.index.query.QueryParseContext;
|
|||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser;
|
||||
|
@ -104,15 +105,21 @@ public class CumulativeSumPipelineAggregationBuilder extends AbstractPipelineAgg
|
|||
throw new IllegalStateException(BUCKETS_PATH.getPreferredName()
|
||||
+ " must contain a single entry for aggregation [" + name + "]");
|
||||
}
|
||||
if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {
|
||||
throw new IllegalStateException("cumulative sum aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
} else {
|
||||
AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;
|
||||
if (parent instanceof HistogramAggregatorFactory) {
|
||||
HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of cumulative sum aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else if (parent instanceof DateHistogramAggregatorFactory) {
|
||||
DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of cumulative sum aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("cumulative sum aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,9 @@ import org.elasticsearch.search.DocValueFormat;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -68,23 +70,22 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalHistogram histo = (InternalHistogram) aggregation;
|
||||
List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
|
||||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
double sum = 0;
|
||||
for (InternalHistogram.Bucket bucket : buckets) {
|
||||
for (Bucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
|
||||
sum += thisBucketValue;
|
||||
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(),
|
||||
new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());
|
||||
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
|
||||
newBuckets.add(newBucket);
|
||||
}
|
||||
return factory.create(newBuckets, histo);
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,9 +31,9 @@ import org.elasticsearch.index.query.QueryParseContext;
|
|||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -162,15 +162,21 @@ public class DerivativePipelineAggregationBuilder extends AbstractPipelineAggreg
|
|||
throw new IllegalStateException(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()
|
||||
+ " must contain a single entry for aggregation [" + name + "]");
|
||||
}
|
||||
if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {
|
||||
throw new IllegalStateException("derivative aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
} else {
|
||||
AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;
|
||||
if (parent instanceof HistogramAggregatorFactory) {
|
||||
HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of derivative aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else if (parent instanceof DateHistogramAggregatorFactory) {
|
||||
DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of derivative aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("derivative aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,14 +22,14 @@ package org.elasticsearch.search.aggregations.pipeline.derivative;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -77,28 +77,27 @@ public class DerivativePipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalHistogram histo = (InternalHistogram) aggregation;
|
||||
List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
|
||||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
Long lastBucketKey = null;
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
Number lastBucketKey = null;
|
||||
Double lastBucketValue = null;
|
||||
for (InternalHistogram.Bucket bucket : buckets) {
|
||||
Long thisBucketKey = resolveBucketKeyAsLong(bucket);
|
||||
for (Bucket bucket : buckets) {
|
||||
Number thisBucketKey = factory.getKey(bucket);
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
if (lastBucketValue != null && thisBucketValue != null) {
|
||||
double gradient = thisBucketValue - lastBucketValue;
|
||||
double xDiff = -1;
|
||||
if (xAxisUnits != null) {
|
||||
xDiff = (thisBucketKey - lastBucketKey) / xAxisUnits;
|
||||
xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits;
|
||||
}
|
||||
final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(
|
||||
aggs), bucket.getKeyed(), bucket.getFormatter());
|
||||
Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
|
||||
newBuckets.add(newBucket);
|
||||
} else {
|
||||
newBuckets.add(bucket);
|
||||
|
@ -106,18 +105,7 @@ public class DerivativePipelineAggregator extends PipelineAggregator {
|
|||
lastBucketKey = thisBucketKey;
|
||||
lastBucketValue = thisBucketValue;
|
||||
}
|
||||
return factory.create(newBuckets, histo);
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
|
||||
private Long resolveBucketKeyAsLong(InternalHistogram.Bucket bucket) {
|
||||
Object key = bucket.getKey();
|
||||
if (key instanceof DateTime) {
|
||||
return ((DateTime) key).getMillis();
|
||||
} else if (key instanceof Number) {
|
||||
return ((Number) key).longValue();
|
||||
} else {
|
||||
throw new AggregationExecutionException("InternalBucket keys must be either a Number or a DateTime for aggregation " + name()
|
||||
+ ". Found bucket with key " + key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@ import org.elasticsearch.index.query.QueryParseContext;
|
|||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -267,15 +268,21 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio
|
|||
throw new IllegalStateException(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()
|
||||
+ " must contain a single entry for aggregation [" + name + "]");
|
||||
}
|
||||
if (!(parent instanceof AbstractHistogramAggregatorFactory<?>)) {
|
||||
throw new IllegalStateException("moving average aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
} else {
|
||||
AbstractHistogramAggregatorFactory<?> histoParent = (AbstractHistogramAggregatorFactory<?>) parent;
|
||||
if (parent instanceof HistogramAggregatorFactory) {
|
||||
HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of moving average aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else if (parent instanceof DateHistogramAggregatorFactory) {
|
||||
DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;
|
||||
if (histoParent.minDocCount() != 0) {
|
||||
throw new IllegalStateException("parent histogram of moving average aggregation [" + name
|
||||
+ "] must have min_doc_count of 0");
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("moving average aggregation [" + name
|
||||
+ "] must have a histogram or date_histogram as parent");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,16 +23,16 @@ import org.elasticsearch.common.collect.EvictingQueue;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -93,14 +93,14 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalHistogram histo = (InternalHistogram) aggregation;
|
||||
List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
|
||||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
|
||||
|
||||
long lastValidKey = 0;
|
||||
Number lastValidKey = 0;
|
||||
int lastValidPosition = 0;
|
||||
int counter = 0;
|
||||
|
||||
|
@ -110,12 +110,12 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
model = minimize(buckets, histo, model);
|
||||
}
|
||||
|
||||
for (InternalHistogram.Bucket bucket : buckets) {
|
||||
for (Bucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
|
||||
// Default is to reuse existing bucket. Simplifies the rest of the logic,
|
||||
// since we only change newBucket if we can add to it
|
||||
InternalHistogram.Bucket newBucket = bucket;
|
||||
Bucket newBucket = bucket;
|
||||
|
||||
if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {
|
||||
|
||||
|
@ -127,18 +127,11 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(
|
||||
aggs), bucket.getKeyed(), bucket.getFormatter());
|
||||
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
|
||||
}
|
||||
|
||||
if (predict > 0) {
|
||||
if (bucket.getKey() instanceof Number) {
|
||||
lastValidKey = ((Number) bucket.getKey()).longValue();
|
||||
} else if (bucket.getKey() instanceof DateTime) {
|
||||
lastValidKey = ((DateTime) bucket.getKey()).getMillis();
|
||||
} else {
|
||||
throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + lastValidKey + "]");
|
||||
}
|
||||
lastValidKey = factory.getKey(bucket);
|
||||
lastValidPosition = counter;
|
||||
}
|
||||
|
||||
|
@ -150,20 +143,14 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
}
|
||||
|
||||
if (buckets.size() > 0 && predict > 0) {
|
||||
|
||||
boolean keyed;
|
||||
DocValueFormat formatter;
|
||||
keyed = buckets.get(0).getKeyed();
|
||||
formatter = buckets.get(0).getFormatter();
|
||||
|
||||
double[] predictions = model.predict(values, predict);
|
||||
for (int i = 0; i < predictions.length; i++) {
|
||||
|
||||
List<InternalAggregation> aggs;
|
||||
long newKey = histo.getRounding().nextRoundingValue(lastValidKey);
|
||||
Number newKey = factory.nextKey(lastValidKey);
|
||||
|
||||
if (lastValidPosition + i + 1 < newBuckets.size()) {
|
||||
InternalHistogram.Bucket bucket = (InternalHistogram.Bucket) newBuckets.get(lastValidPosition + i + 1);
|
||||
Bucket bucket = newBuckets.get(lastValidPosition + i + 1);
|
||||
|
||||
// Get the existing aggs in the bucket so we don't clobber data
|
||||
aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
|
@ -171,8 +158,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
|
||||
InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(
|
||||
aggs), keyed, formatter);
|
||||
Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
|
||||
|
||||
// Overwrite the existing bucket with the new version
|
||||
newBuckets.set(lastValidPosition + i + 1, newBucket);
|
||||
|
@ -182,8 +168,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
aggs = new ArrayList<>();
|
||||
aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
|
||||
InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(
|
||||
aggs), keyed, formatter);
|
||||
Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs));
|
||||
|
||||
// Since this is a new bucket, simply append it
|
||||
newBuckets.add(newBucket);
|
||||
|
@ -192,16 +177,16 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
}
|
||||
}
|
||||
|
||||
return factory.create(newBuckets, histo);
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
|
||||
private MovAvgModel minimize(List<? extends InternalHistogram.Bucket> buckets, InternalHistogram histo, MovAvgModel model) {
|
||||
private MovAvgModel minimize(List<? extends Bucket> buckets, MultiBucketsAggregation histo, MovAvgModel model) {
|
||||
|
||||
int counter = 0;
|
||||
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
|
||||
|
||||
double[] test = new double[window];
|
||||
ListIterator<? extends InternalHistogram.Bucket> iter = buckets.listIterator(buckets.size());
|
||||
ListIterator<? extends Bucket> iter = buckets.listIterator(buckets.size());
|
||||
|
||||
// We have to walk the iterator backwards because we don't know if/how many buckets are empty.
|
||||
while (iter.hasPrevious() && counter < window) {
|
||||
|
|
|
@ -26,8 +26,10 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -78,17 +80,17 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalHistogram histo = (InternalHistogram) aggregation;
|
||||
List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
|
||||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag);
|
||||
int counter = 0;
|
||||
|
||||
for (InternalHistogram.Bucket bucket : buckets) {
|
||||
for (Bucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
InternalHistogram.Bucket newBucket = bucket;
|
||||
Bucket newBucket = bucket;
|
||||
|
||||
counter += 1;
|
||||
|
||||
|
@ -113,8 +115,7 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations(
|
||||
aggs), bucket.getKeyed(), bucket.getFormatter());
|
||||
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
|
||||
}
|
||||
|
||||
|
||||
|
@ -122,6 +123,6 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
lagWindow.add(thisBucketValue);
|
||||
|
||||
}
|
||||
return factory.create(newBuckets, histo);
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -348,7 +348,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
|
||||
try {
|
||||
// we flush first to make sure we get the latest writes snapshotted
|
||||
IndexCommit snapshotIndexCommit = indexShard.snapshotIndex(true);
|
||||
IndexCommit snapshotIndexCommit = indexShard.acquireIndexCommit(true);
|
||||
try {
|
||||
repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotIndexCommit, snapshotStatus);
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -358,7 +358,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
TimeValue.timeValueMillis(snapshotStatus.time()), sb);
|
||||
}
|
||||
} finally {
|
||||
indexShard.releaseSnapshot(snapshotIndexCommit);
|
||||
indexShard.releaseIndexCommit(snapshotIndexCommit);
|
||||
}
|
||||
} catch (SnapshotFailedEngineException e) {
|
||||
throw e;
|
||||
|
|
|
@ -18,6 +18,13 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
|
@ -50,12 +57,6 @@ import org.junit.After;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
|
||||
|
@ -167,7 +168,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
|
|||
public TestNode(String name, ThreadPool threadPool, Settings settings) {
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(settings,
|
||||
new LocalTransport(settings, threadPool, new NamedWriteableRegistry(),
|
||||
new LocalTransport(settings, threadPool, new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NoneCircuitBreakerService()), threadPool) {
|
||||
@Override
|
||||
protected TaskManager createTaskManager() {
|
||||
|
|
|
@ -74,8 +74,9 @@ public class ClusterRerouteRequestTests extends ESTestCase {
|
|||
private final AllocationCommandRegistry allocationCommandRegistry;
|
||||
|
||||
public ClusterRerouteRequestTests() {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
allocationCommandRegistry = new NetworkModule(null, null, true, namedWriteableRegistry).getAllocationCommandRegistry();
|
||||
NetworkModule networkModule = new NetworkModule(null, null, true);
|
||||
allocationCommandRegistry = networkModule.getAllocationCommandRegistry();
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
|
||||
}
|
||||
|
||||
private ClusterRerouteRequest randomRequest() {
|
||||
|
|
|
@ -64,8 +64,8 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
|
|||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
req.writeTo(out);
|
||||
BytesReference bytes = out.bytes();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
new NetworkModule(null, Settings.EMPTY, true, namedWriteableRegistry);
|
||||
NetworkModule networkModule = new NetworkModule(null, Settings.EMPTY, true);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
|
||||
StreamInput wrap = new NamedWriteableAwareStreamInput(bytes.streamInput(),
|
||||
namedWriteableRegistry);
|
||||
ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest();
|
||||
|
|
|
@ -21,28 +21,23 @@ package org.elasticsearch.action.admin.indices.upgrade;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndexSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.test.ESBackcompatTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.test.OldIndexUtils.assertNotUpgraded;
|
||||
import static org.elasticsearch.test.OldIndexUtils.assertUpgraded;
|
||||
import static org.elasticsearch.test.OldIndexUtils.getUpgradeStatus;
|
||||
import static org.elasticsearch.test.OldIndexUtils.isUpgraded;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
|
@ -152,30 +147,6 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
|||
assertUpgraded(client());
|
||||
}
|
||||
|
||||
public static void assertNotUpgraded(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
// TODO: it would be better for this to be strictly greater, but sometimes an extra flush
|
||||
// mysteriously happens after the second round of docs are indexed
|
||||
assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
|
||||
status.getTotalBytes() >= status.getToUpgradeBytes());
|
||||
assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
|
||||
}
|
||||
}
|
||||
|
||||
public static void assertNoAncientSegments(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
// TODO: it would be better for this to be strictly greater, but sometimes an extra flush
|
||||
// mysteriously happens after the second round of docs are indexed
|
||||
assertTrue("index " + status.getIndex() + " should not have any ancient segments",
|
||||
status.getToUpgradeBytesAncient() == 0);
|
||||
assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
|
||||
status.getTotalBytes() >= status.getToUpgradeBytes());
|
||||
assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns true if there are any ancient segments. */
|
||||
public static boolean hasAncientSegments(Client client, String index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
|
@ -196,44 +167,6 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static void assertUpgraded(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
assertEquals("index " + status.getIndex() + " should be upgraded",
|
||||
0, status.getToUpgradeBytes());
|
||||
}
|
||||
|
||||
// double check using the segments api that all segments are actually upgraded
|
||||
IndicesSegmentResponse segsRsp;
|
||||
if (index == null) {
|
||||
segsRsp = client().admin().indices().prepareSegments().execute().actionGet();
|
||||
} else {
|
||||
segsRsp = client().admin().indices().prepareSegments(index).execute().actionGet();
|
||||
}
|
||||
for (IndexSegments indexSegments : segsRsp.getIndices().values()) {
|
||||
for (IndexShardSegments shard : indexSegments) {
|
||||
for (ShardSegments segs : shard.getShards()) {
|
||||
for (Segment seg : segs.getSegments()) {
|
||||
assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(),
|
||||
Version.CURRENT.luceneVersion.major, seg.version.major);
|
||||
assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(),
|
||||
Version.CURRENT.luceneVersion.minor, seg.version.minor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static boolean isUpgraded(Client client, String index) throws Exception {
|
||||
ESLogger logger = Loggers.getLogger(UpgradeIT.class);
|
||||
int toUpgrade = 0;
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes());
|
||||
toUpgrade += status.getToUpgradeBytes();
|
||||
}
|
||||
return toUpgrade == 0;
|
||||
}
|
||||
|
||||
static class UpgradeStatus {
|
||||
public final String indexName;
|
||||
public final int totalBytes;
|
||||
|
@ -249,10 +182,4 @@ public class UpgradeIT extends ESBackcompatTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static Collection<IndexUpgradeStatus> getUpgradeStatus(Client client, String... indices) throws Exception {
|
||||
UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(indices).get();
|
||||
assertNoFailures(upgradeStatusResponse);
|
||||
return upgradeStatusResponse.getIndices().values();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -89,7 +90,7 @@ public class BroadcastReplicationTests extends ESTestCase {
|
|||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(), circuitBreakerService);
|
||||
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(Collections.emptyList()), circuitBreakerService);
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
|
||||
transportService.start();
|
||||
|
|
|
@ -19,10 +19,8 @@
|
|||
|
||||
package org.elasticsearch.bwcompat;
|
||||
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
|
@ -30,18 +28,14 @@ import org.elasticsearch.action.admin.indices.segments.IndexSegments;
|
|||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.IndexFolderUpgrader;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -50,7 +44,6 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -64,6 +57,7 @@ import org.elasticsearch.search.sort.SortOrder;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.OldIndexUtils;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
import org.hamcrest.Matchers;
|
||||
|
@ -72,13 +66,8 @@ import org.junit.Before;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -87,8 +76,8 @@ import java.util.Map;
|
|||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import static org.elasticsearch.test.OldIndexUtils.assertUpgradeWorks;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
// needs at least 2 nodes since it bumps replicas to 1
|
||||
|
@ -113,19 +102,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
|
||||
@Before
|
||||
public void initIndexesList() throws Exception {
|
||||
indexes = loadIndexesList("index");
|
||||
unsupportedIndexes = loadIndexesList("unsupported");
|
||||
}
|
||||
|
||||
private List<String> loadIndexesList(String prefix) throws IOException {
|
||||
List<String> indexes = new ArrayList<>();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(getBwcIndicesPath(), prefix + "-*.zip")) {
|
||||
for (Path path : stream) {
|
||||
indexes.add(path.getFileName().toString());
|
||||
}
|
||||
}
|
||||
Collections.sort(indexes);
|
||||
return indexes;
|
||||
indexes = OldIndexUtils.loadIndexesList("index", getBwcIndicesPath());
|
||||
unsupportedIndexes = OldIndexUtils.loadIndexesList("unsupported", getBwcIndicesPath());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -138,11 +116,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
public Settings nodeSettings(int ord) {
|
||||
return Settings.builder()
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) // disable merging so no segments will be upgraded
|
||||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 30) // speed up recoveries
|
||||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 30)
|
||||
.build();
|
||||
return OldIndexUtils.getSettings();
|
||||
}
|
||||
|
||||
void setupCluster() throws Exception {
|
||||
|
@ -151,14 +125,15 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
Path baseTempDir = createTempDir();
|
||||
// start single data path node
|
||||
Settings.Builder nodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
InternalTestCluster.Async<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
||||
|
||||
// start multi data path node
|
||||
nodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir.resolve("multi-path2").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir
|
||||
.resolve("multi-path2").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
||||
|
||||
// find single data path dir
|
||||
|
@ -174,8 +149,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
multiDataPathNodeName = multiDataPathNode.get();
|
||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
|
||||
assertEquals(2, nodePaths.length);
|
||||
multiDataPath = new Path[] {nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
||||
nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)};
|
||||
multiDataPath = new Path[]{nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
||||
nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)};
|
||||
assertFalse(Files.exists(multiDataPath[0]));
|
||||
assertFalse(Files.exists(multiDataPath[1]));
|
||||
Files.createDirectories(multiDataPath[0]);
|
||||
|
@ -186,42 +161,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
void upgradeIndexFolder() throws Exception {
|
||||
final NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
|
||||
final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnv);
|
||||
}
|
||||
|
||||
String loadIndex(String indexFile) throws Exception {
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
||||
|
||||
// decompress the index
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexFile);
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
Path[] list = FileSystemUtils.files(unzipDataDir);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster");
|
||||
}
|
||||
|
||||
// the bwc scripts packs the indices under this path
|
||||
Path src = list[0].resolve("nodes/0/indices/" + indexName);
|
||||
assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
|
||||
|
||||
if (randomBoolean()) {
|
||||
logger.info("--> injecting index [{}] into single data path", indexName);
|
||||
copyIndex(logger, src, indexName, singleDataPath);
|
||||
} else {
|
||||
logger.info("--> injecting index [{}] into multi data path", indexName);
|
||||
copyIndex(logger, src, indexName, multiDataPath);
|
||||
}
|
||||
return indexName;
|
||||
OldIndexUtils.upgradeIndexFolder(internalCluster(), singleDataPathNodeName);
|
||||
OldIndexUtils.upgradeIndexFolder(internalCluster(), multiDataPathNodeName);
|
||||
}
|
||||
|
||||
void importIndex(String indexName) throws IOException {
|
||||
|
@ -230,44 +171,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
ensureGreen(indexName);
|
||||
}
|
||||
|
||||
// randomly distribute the files from src over dests paths
|
||||
public static void copyIndex(final ESLogger logger, final Path src, final String indexName, final Path... dests) throws IOException {
|
||||
Path destinationDataPath = dests[randomInt(dests.length - 1)];
|
||||
for (Path dest : dests) {
|
||||
Path indexDir = dest.resolve(indexName);
|
||||
assertFalse(Files.exists(indexDir));
|
||||
Files.createDirectories(indexDir);
|
||||
}
|
||||
Files.walkFileTree(src, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
Path relativeDir = src.relativize(dir);
|
||||
for (Path dest : dests) {
|
||||
Path destDir = dest.resolve(indexName).resolve(relativeDir);
|
||||
Files.createDirectories(destDir);
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
// skip lock file, we don't need it
|
||||
logger.trace("Skipping lock file: {}", file);
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
Path relativeFile = src.relativize(file);
|
||||
Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile);
|
||||
logger.trace("--> Moving {} to {}", relativeFile, destFile);
|
||||
Files.move(file, destFile);
|
||||
assertFalse(Files.exists(file));
|
||||
assertTrue(Files.exists(destFile));
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void unloadIndex(String indexName) throws Exception {
|
||||
assertAcked(client().admin().indices().prepareDelete(indexName).get());
|
||||
}
|
||||
|
@ -295,7 +198,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
fail(msg.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testOldIndexes() throws Exception {
|
||||
setupCluster();
|
||||
|
||||
|
@ -309,8 +212,18 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
void assertOldIndexWorks(String index) throws Exception {
|
||||
Version version = extractVersion(index);
|
||||
String indexName = loadIndex(index);
|
||||
Version version = OldIndexUtils.extractVersion(index);
|
||||
Path[] paths;
|
||||
if (randomBoolean()) {
|
||||
logger.info("--> injecting index [{}] into single data path", index);
|
||||
paths = new Path[]{singleDataPath};
|
||||
} else {
|
||||
logger.info("--> injecting index [{}] into multi data path", index);
|
||||
paths = multiDataPath;
|
||||
}
|
||||
|
||||
String indexName = index.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
|
||||
OldIndexUtils.loadIndex(indexName, index, createTempDir(), getBwcIndicesPath(), logger, paths);
|
||||
// we explicitly upgrade the index folders as these indices
|
||||
// are imported as dangling indices and not available on
|
||||
// node startup
|
||||
|
@ -322,21 +235,12 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
assertBasicAggregationWorks(indexName);
|
||||
assertRealtimeGetWorks(indexName);
|
||||
assertNewReplicasWork(indexName);
|
||||
assertUpgradeWorks(indexName, isLatestLuceneVersion(version));
|
||||
assertUpgradeWorks(client(), indexName, version);
|
||||
assertDeleteByQueryWorked(indexName, version);
|
||||
assertPositionIncrementGapDefaults(indexName, version);
|
||||
unloadIndex(indexName);
|
||||
}
|
||||
|
||||
Version extractVersion(String index) {
|
||||
return Version.fromString(index.substring(index.indexOf('-') + 1, index.lastIndexOf('.')));
|
||||
}
|
||||
|
||||
boolean isLatestLuceneVersion(Version version) {
|
||||
return version.luceneVersion.major == Version.CURRENT.luceneVersion.major &&
|
||||
version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor;
|
||||
}
|
||||
|
||||
void assertIndexSanity(String indexName, Version indexCreated) {
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(indexName).get();
|
||||
assertEquals(1, getIndexResponse.indices().length);
|
||||
|
@ -411,7 +315,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
|
||||
void assertBasicAggregationWorks(String indexName) {
|
||||
// histogram on a long
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field("long_sort").interval(10)).get();
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field
|
||||
("long_sort").interval(10)).get();
|
||||
ElasticsearchAssertions.assertSearchResponse(searchRsp);
|
||||
Histogram histo = searchRsp.getAggregations().get("histo");
|
||||
assertNotNull(histo);
|
||||
|
@ -454,7 +359,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
final long startTime = System.currentTimeMillis();
|
||||
logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, indexName);
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
|
||||
.put("number_of_replicas", numReplicas)
|
||||
.put("number_of_replicas", numReplicas)
|
||||
).execute().actionGet());
|
||||
ensureGreen(TimeValue.timeValueMinutes(2), indexName);
|
||||
logger.debug("--> index [{}] is green, took [{}]", indexName, TimeValue.timeValueMillis(System.currentTimeMillis() - startTime));
|
||||
|
@ -482,14 +387,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
void assertUpgradeWorks(String indexName, boolean alreadyLatest) throws Exception {
|
||||
if (alreadyLatest == false) {
|
||||
UpgradeIT.assertNotUpgraded(client(), indexName);
|
||||
}
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexName).get());
|
||||
UpgradeIT.assertUpgraded(client(), indexName);
|
||||
}
|
||||
|
||||
private Path getNodeDir(String indexFile) throws IOException {
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
|
|
|
@ -436,8 +436,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
|
|||
StreamInput in = bytes.bytes().streamInput();
|
||||
|
||||
// Since the commands are named writeable we need to register them and wrap the input stream
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
new NetworkModule(null, Settings.EMPTY, true, namedWriteableRegistry);
|
||||
NetworkModule networkModule = new NetworkModule(null, Settings.EMPTY, true);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(networkModule.getNamedWriteables());
|
||||
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
|
||||
|
||||
// Now we can read them!
|
||||
|
@ -483,8 +483,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
|
|||
// move two tokens, parser expected to be "on" `commands` field
|
||||
parser.nextToken();
|
||||
parser.nextToken();
|
||||
AllocationCommandRegistry registry = new NetworkModule(null, Settings.EMPTY, true, new NamedWriteableRegistry())
|
||||
.getAllocationCommandRegistry();
|
||||
AllocationCommandRegistry registry = new NetworkModule(null, Settings.EMPTY, true).getAllocationCommandRegistry();
|
||||
AllocationCommands sCommands = AllocationCommands.fromXContent(parser, ParseFieldMatcher.STRICT, registry);
|
||||
|
||||
assertThat(sCommands.commands().size(), equalTo(5));
|
||||
|
|
|
@ -34,6 +34,10 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
|
@ -48,8 +52,9 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte
|
|||
@BeforeClass
|
||||
public static void init() {
|
||||
if (namedWriteableRegistry == null) {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
ShapeBuilders.register(namedWriteableRegistry);
|
||||
List<NamedWriteableRegistry.Entry> shapes = new ArrayList<>();
|
||||
ShapeBuilders.register(shapes);
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(shapes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.joda.time.DateTimeZone;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -329,8 +331,9 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
|
||||
public void testNamedWriteable() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new)
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
|
@ -342,8 +345,9 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testNamedWriteableList() throws IOException {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new)
|
||||
));
|
||||
int size = between(0, 100);
|
||||
List<BaseNamedWriteable> expected = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
@ -359,48 +363,6 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testNamedWriteableDuplicates() throws IOException {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new));
|
||||
assertThat(e.getMessage(), startsWith("named writeable [" + BaseNamedWriteable.class.getName() + "][" + TestNamedWriteable.NAME
|
||||
+ "] is already registered by ["));
|
||||
}
|
||||
|
||||
public void testNamedWriteableUnknownCategory() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeNamedWriteable(new TestNamedWriteable("test1", "test2"));
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), new NamedWriteableRegistry());
|
||||
//no named writeable registered with given name, can write but cannot read it back
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(), equalTo("unknown named writeable category [" + BaseNamedWriteable.class.getName() + "]"));
|
||||
}
|
||||
|
||||
public void testNamedWriteableUnknownNamedWriteable() throws IOException {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeNamedWriteable(new NamedWriteable() {
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
}
|
||||
});
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(BytesReference.toBytes(out.bytes())), namedWriteableRegistry);
|
||||
try {
|
||||
//no named writeable registered with given name under test category, can write but cannot read it back
|
||||
in.readNamedWriteable(BaseNamedWriteable.class);
|
||||
fail("read should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("unknown named writeable [" + BaseNamedWriteable.class.getName() + "][unknown]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testNamedWriteableNotSupportedWithoutWrapping() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2");
|
||||
|
@ -416,8 +378,9 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
|
||||
public void testNamedWriteableReaderReturnsNull() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null)
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
|
@ -437,13 +400,15 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
|
||||
public void testWriteableReaderReturnsWrongName() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> new TestNamedWriteable(in) {
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "intentionally-broken";
|
||||
}
|
||||
});
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) ->
|
||||
new TestNamedWriteable(in) {
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "intentionally-broken";
|
||||
}
|
||||
})
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.io.stream;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class NamedWriteableRegistryTests extends ESTestCase {
|
||||
|
||||
private static class DummyNamedWriteable implements NamedWriteable {
|
||||
public DummyNamedWriteable(StreamInput in) {}
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "test";
|
||||
}
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {}
|
||||
}
|
||||
|
||||
public void testEmpty() throws IOException {
|
||||
new NamedWriteableRegistry(Collections.emptyList()); // does not throw exception
|
||||
}
|
||||
|
||||
public void testBasic() throws IOException {
|
||||
NamedWriteableRegistry.Entry entry =
|
||||
new NamedWriteableRegistry.Entry(NamedWriteable.class, "test", DummyNamedWriteable::new);
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.singletonList(entry));
|
||||
Writeable.Reader<? extends NamedWriteable> reader = registry.getReader(NamedWriteable.class, "test");
|
||||
assertNotNull(reader.read(null));
|
||||
}
|
||||
|
||||
public void testDuplicates() throws IOException {
|
||||
NamedWriteableRegistry.Entry entry =
|
||||
new NamedWriteableRegistry.Entry(NamedWriteable.class, "test", DummyNamedWriteable::new);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new NamedWriteableRegistry(Arrays.asList(entry, entry)));
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("is already registered"));
|
||||
}
|
||||
|
||||
public void testUnknownCategory() throws IOException {
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
registry.getReader(NamedWriteable.class, "test"));
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("Unknown NamedWriteable category ["));
|
||||
}
|
||||
|
||||
public void testUnknownName() throws IOException {
|
||||
NamedWriteableRegistry.Entry entry =
|
||||
new NamedWriteableRegistry.Entry(NamedWriteable.class, "test", DummyNamedWriteable::new);
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.singletonList(entry));
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
registry.getReader(NamedWriteable.class, "dne"));
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("Unknown NamedWriteable ["));
|
||||
}
|
||||
}
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationTask;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Table;
|
||||
|
@ -43,9 +46,6 @@ import org.elasticsearch.test.transport.AssertingLocalTransport;
|
|||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
public class NetworkModuleTests extends ModuleTestCase {
|
||||
|
||||
static class FakeTransportService extends TransportService {
|
||||
|
@ -113,14 +113,13 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
|
||||
.build();
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false,
|
||||
new NamedWriteableRegistry());
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
module.registerTransportService("custom", FakeTransportService.class);
|
||||
assertBinding(module, TransportService.class, FakeTransportService.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
// check it works with transport only as well
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true, new NamedWriteableRegistry());
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
|
||||
module.registerTransportService("custom", FakeTransportService.class);
|
||||
assertBinding(module, TransportService.class, FakeTransportService.class);
|
||||
assertTrue(module.isTransportClient());
|
||||
|
@ -130,14 +129,13 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom")
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.build();
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false,
|
||||
new NamedWriteableRegistry());
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
module.registerTransport("custom", FakeTransport.class);
|
||||
assertBinding(module, Transport.class, FakeTransport.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
// check it works with transport only as well
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true, new NamedWriteableRegistry());
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
|
||||
module.registerTransport("custom", FakeTransport.class);
|
||||
assertBinding(module, Transport.class, FakeTransport.class);
|
||||
assertTrue(module.isTransportClient());
|
||||
|
@ -147,14 +145,13 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom")
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false,
|
||||
new NamedWriteableRegistry());
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
module.registerHttpTransport("custom", FakeHttpTransport.class);
|
||||
assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
// check registration not allowed for transport only
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true, new NamedWriteableRegistry());
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
|
||||
assertTrue(module.isTransportClient());
|
||||
try {
|
||||
module.registerHttpTransport("custom", FakeHttpTransport.class);
|
||||
|
@ -167,23 +164,22 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
// not added if http is disabled
|
||||
settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false, new NamedWriteableRegistry());
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
assertNotBound(module, HttpServerTransport.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
}
|
||||
|
||||
public void testRegisterTaskStatus() {
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry();
|
||||
Settings settings = Settings.EMPTY;
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false, registry);
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(module.getNamedWriteables());
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
// Builtin reader comes back
|
||||
assertNotNull(registry.getReader(Task.Status.class, ReplicationTask.Status.NAME));
|
||||
|
||||
module.registerTaskStatus(DummyTaskStatus.NAME, DummyTaskStatus::new);
|
||||
assertEquals("test", expectThrows(UnsupportedOperationException.class,
|
||||
() -> registry.getReader(Task.Status.class, DummyTaskStatus.NAME).read(null)).getMessage());
|
||||
assertTrue(module.getNamedWriteables().stream().anyMatch(x -> x.name.equals(DummyTaskStatus.NAME)));
|
||||
}
|
||||
|
||||
private class DummyTaskStatus implements Task.Status {
|
||||
|
|
|
@ -33,10 +33,8 @@ public class RoundingTests extends ESTestCase {
|
|||
int interval = 10;
|
||||
Rounding.Interval rounding = new Rounding.Interval(interval);
|
||||
int value = 24;
|
||||
final long key = rounding.roundKey(24);
|
||||
final long r = rounding.round(24);
|
||||
String message = "round(" + value + ", interval=" + interval + ") = " + r;
|
||||
assertEquals(value/interval, key);
|
||||
assertEquals(value/interval * interval, r);
|
||||
assertEquals(message, 0, r % interval);
|
||||
}
|
||||
|
@ -46,13 +44,11 @@ public class RoundingTests extends ESTestCase {
|
|||
Rounding.Interval rounding = new Rounding.Interval(interval);
|
||||
for (int i = 0; i < 1000; ++i) {
|
||||
long l = Math.max(randomLong(), Long.MIN_VALUE + interval);
|
||||
final long key = rounding.roundKey(l);
|
||||
final long r = rounding.round(l);
|
||||
String message = "round(" + l + ", interval=" + interval + ") = " + r;
|
||||
assertEquals(message, 0, r % interval);
|
||||
assertThat(message, r, lessThanOrEqualTo(l));
|
||||
assertThat(message, r + interval, greaterThan(l));
|
||||
assertEquals(message, r, key*interval);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,15 +61,11 @@ public class RoundingTests extends ESTestCase {
|
|||
final long interval = 10;
|
||||
final long offset = 7;
|
||||
Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset);
|
||||
assertEquals(-1, rounding.roundKey(6));
|
||||
assertEquals(-3, rounding.round(6));
|
||||
assertEquals(7, rounding.nextRoundingValue(-3));
|
||||
assertEquals(0, rounding.roundKey(7));
|
||||
assertEquals(7, rounding.round(7));
|
||||
assertEquals(17, rounding.nextRoundingValue(7));
|
||||
assertEquals(0, rounding.roundKey(16));
|
||||
assertEquals(7, rounding.round(16));
|
||||
assertEquals(1, rounding.roundKey(17));
|
||||
assertEquals(17, rounding.round(17));
|
||||
assertEquals(27, rounding.nextRoundingValue(17));
|
||||
}
|
||||
|
@ -89,13 +81,10 @@ public class RoundingTests extends ESTestCase {
|
|||
Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset);
|
||||
long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow
|
||||
long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin);
|
||||
final long key = rounding.roundKey(value);
|
||||
final long key_next = rounding.roundKey(value + interval);
|
||||
final long r_value = rounding.round(value);
|
||||
final long nextRoundingValue = rounding.nextRoundingValue(r_value);
|
||||
assertThat("Rounding should be idempotent", r_value, equalTo(rounding.round(r_value)));
|
||||
assertThat("Rounded value smaller than unrounded, regardless of offset", r_value - offset, lessThanOrEqualTo(value - offset));
|
||||
assertThat("Key and next_key should differ by one", key_next - key, equalTo(1L));
|
||||
assertThat("Rounded value <= value < next interval start", r_value + interval, greaterThan(value));
|
||||
assertThat("NextRounding value should be interval from rounded value", r_value + interval, equalTo(nextRoundingValue));
|
||||
}
|
||||
|
|
|
@ -65,9 +65,6 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build();
|
||||
DateTimeZone tz = DateTimeZone.UTC;
|
||||
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz));
|
||||
long roundKey = tzRounding.roundKey(time("2009-02-03T01:01:01"));
|
||||
assertThat(roundKey, isDate(tzRounding.roundKey(time("2009-02-03T00:00:00.000Z")), tz));
|
||||
assertThat(tzRounding.valueForKey(roundKey), isDate(time("2009-02-03T00:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz));
|
||||
|
@ -86,9 +83,6 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
DateTimeZone tz = DateTimeZone.forOffsetHours(-1);
|
||||
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build();
|
||||
assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz));
|
||||
long roundKey = tzRounding.roundKey(time("2009-02-03T00:01:01"));
|
||||
assertThat(roundKey, equalTo(tzRounding.roundKey(time("2009-02-02T19:00:00.000Z"))));
|
||||
assertThat(tzRounding.valueForKey(roundKey), isDate(time("2009-02-02T19:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz));
|
||||
|
||||
assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T13:00:00.000Z"), tz));
|
||||
|
@ -102,9 +96,6 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
DateTimeZone tz = DateTimeZone.forOffsetHours(-8);
|
||||
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build();
|
||||
assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz));
|
||||
long roundKey = tzRounding.roundKey(time("2009-02-03T00:01:01"));
|
||||
assertThat(roundKey, isDate(tzRounding.roundKey(time("2009-02-02T20:00:00.000Z")), tz));
|
||||
assertThat(tzRounding.valueForKey(roundKey), isDate(time("2009-02-02T20:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz));
|
||||
|
||||
assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T08:00:00.000Z"), tz));
|
||||
|
@ -130,17 +121,11 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
tz = DateTimeZone.forID("-02:00");
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
|
||||
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz));
|
||||
long roundKey = tzRounding.roundKey(time("2009-02-03T01:01:01"));
|
||||
assertThat(roundKey, isDate(tzRounding.roundKey(time("2009-02-02T02:00:00.000Z")), tz));
|
||||
assertThat(tzRounding.valueForKey(roundKey), isDate(time("2009-02-02T02:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz));
|
||||
|
||||
// date in Feb-3rd, also in -02:00 timezone
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
|
||||
assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz));
|
||||
roundKey = tzRounding.roundKey(time("2009-02-03T02:01:01"));
|
||||
assertThat(roundKey, isDate(tzRounding.roundKey(time("2009-02-03T02:00:00.000Z")), tz));
|
||||
assertThat(tzRounding.valueForKey(roundKey), isDate(time("2009-02-03T02:00:00.000Z"), tz));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz));
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
|
@ -39,6 +38,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.OldIndexUtils;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -215,7 +215,7 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src));
|
||||
final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath;
|
||||
logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath);
|
||||
OldIndexBackwardsCompatibilityIT.copyIndex(logger, src, indexName, indicesPath);
|
||||
OldIndexUtils.copyIndex(logger, src, indexName, indicesPath);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
|
||||
|
||||
// ensure old index folder is deleted
|
||||
|
|
|
@ -134,7 +134,7 @@ public class ZenFaultDetectionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected MockTransportService build(Settings settings, Version version) {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
MockTransportService transportService =
|
||||
new MockTransportService(
|
||||
Settings.builder()
|
||||
|
|
|
@ -200,7 +200,7 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
private NetworkHandle startServices(Settings settings, ThreadPool threadPool, NetworkService networkService, String nodeId,
|
||||
Version version) {
|
||||
MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(), new NamedWriteableRegistry(), networkService, version);
|
||||
new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version);
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
|
|
|
@ -135,7 +135,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
environment = new Environment(settings);
|
||||
nodeServicesProvider = newNodeServiceProvider(settings, environment, null);
|
||||
nodeEnvironment = new NodeEnvironment(settings, environment);
|
||||
mapperRegistry = new IndicesModule(new NamedWriteableRegistry(), Collections.emptyList()).getMapperRegistry();
|
||||
mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -71,6 +71,7 @@ import java.util.List;
|
|||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
|
@ -698,7 +699,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
/** wait until the node has the specified number of shards allocated on it */
|
||||
|
@ -715,7 +716,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
public void testIndexOnSharedFSRecoversToAnyNode() throws Exception {
|
||||
|
|
|
@ -2023,7 +2023,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
|
||||
AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
MapperRegistry mapperRegistry = new IndicesModule(new NamedWriteableRegistry(), Collections.emptyList()).getMapperRegistry();
|
||||
MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
|
||||
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null);
|
||||
DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService);
|
||||
this.docMapper = b.build(mapperService);
|
||||
|
|
|
@ -74,8 +74,8 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase {
|
|||
.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false)
|
||||
.build();
|
||||
clusterService = createClusterService(THREAD_POOL);
|
||||
transport =
|
||||
new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(), new NoneCircuitBreakerService());
|
||||
transport = new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NoneCircuitBreakerService());
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
indicesService = getInstanceFromNode(IndicesService.class);
|
||||
shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL);
|
||||
|
|
|
@ -107,7 +107,7 @@ public class ParentMappingTests extends ESSingleNodeTestCase {
|
|||
Collections.emptyMap(), Collections.emptyMap());
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService,
|
||||
new IndicesModule(new NamedWriteableRegistry(), emptyList()).getMapperRegistry(), () -> null);
|
||||
new IndicesModule(emptyList()).getMapperRegistry(), () -> null);
|
||||
XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type")
|
||||
.startObject("properties")
|
||||
.endObject()
|
||||
|
|
|
@ -68,8 +68,9 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false, emptyList()).getQueryParserRegistry();
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
indicesQueriesRegistry = searchModule.getQueryParserRegistry();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -42,8 +42,7 @@ public class QueryParseContextTests extends ESTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, emptyList())
|
||||
.getQueryParserRegistry();
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry();
|
||||
}
|
||||
|
||||
public void testParseTopLevelBuilder() throws IOException {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.index.replication;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexNotFoundException;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
|
@ -299,7 +300,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
replica.prepareForIndexRecovery();
|
||||
RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
|
||||
StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
|
||||
replica.store().getMetadataOrEmpty(), RecoveryState.Type.REPLICA, 0);
|
||||
getMetadataSnapshotOrEmpty(replica), RecoveryState.Type.REPLICA, 0);
|
||||
RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {},
|
||||
(int) ByteSizeUnit.MB.toKB(1), logger);
|
||||
recovery.recoverToTarget();
|
||||
|
@ -307,6 +308,20 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
|
||||
}
|
||||
|
||||
private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
|
||||
Store.MetadataSnapshot result;
|
||||
try {
|
||||
result = replica.snapshotStoreMetadata();
|
||||
} catch (IndexNotFoundException e) {
|
||||
// OK!
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn("{} failed read store, treating as empty", e);
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public synchronized DiscoveryNode getPrimaryNode() {
|
||||
return getDiscoveryNode(primary.routingEntry().currentNodeId());
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -156,6 +157,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear
|
|||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
/**
|
||||
* Simple unit-test IndexShard related operations.
|
||||
|
@ -476,6 +478,76 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
ShardStateMetaData.FORMAT.write(shardStateMetaData, shardPaths);
|
||||
}
|
||||
|
||||
public void testAcquireIndexCommit() throws IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
int numDocs = randomInt(20);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
client().prepareIndex("test", "type", "id_" + i).setSource("{}").get();
|
||||
}
|
||||
final boolean flushFirst = randomBoolean();
|
||||
IndexCommit commit = shard.acquireIndexCommit(flushFirst);
|
||||
int moreDocs = randomInt(20);
|
||||
for (int i = 0; i < moreDocs; i++) {
|
||||
client().prepareIndex("test", "type", "id_" + numDocs + i).setSource("{}").get();
|
||||
}
|
||||
shard.flush(new FlushRequest("index"));
|
||||
// check that we can still read the commit that we captured
|
||||
try (IndexReader reader = DirectoryReader.open(commit)) {
|
||||
assertThat(reader.numDocs(), equalTo(flushFirst ? numDocs : 0));
|
||||
}
|
||||
shard.releaseIndexCommit(commit);
|
||||
shard.flush(new FlushRequest("index").force(true));
|
||||
// check it's clean up
|
||||
assertThat(DirectoryReader.listCommits(shard.store().directory()), hasSize(1));
|
||||
}
|
||||
|
||||
/***
|
||||
* test one can snapshot the store at various lifecycle stages
|
||||
*/
|
||||
public void testSnapshotStore() throws IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||
client().admin().indices().prepareFlush().get();
|
||||
ShardRouting routing = shard.routingEntry();
|
||||
test.removeShard(0, "b/c simon says so");
|
||||
routing = ShardRoutingHelper.reinit(routing);
|
||||
IndexShard newShard = test.createShard(routing);
|
||||
newShard.updateRoutingEntry(routing);
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
|
||||
Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata();
|
||||
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
|
||||
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
|
||||
localNode));
|
||||
|
||||
snapshot = newShard.snapshotStoreMetadata();
|
||||
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
|
||||
|
||||
assertTrue(newShard.recoverFromStore());
|
||||
|
||||
snapshot = newShard.snapshotStoreMetadata();
|
||||
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
|
||||
|
||||
newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted());
|
||||
|
||||
snapshot = newShard.snapshotStoreMetadata();
|
||||
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
|
||||
|
||||
newShard.close("test", false);
|
||||
|
||||
snapshot = newShard.snapshotStoreMetadata();
|
||||
assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2"));
|
||||
}
|
||||
|
||||
public void testDurableFlagHasEffect() {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
|
|
@ -328,15 +328,14 @@ public class StoreTests extends ESTestCase {
|
|||
Store.MetadataSnapshot metadata;
|
||||
// check before we committed
|
||||
try {
|
||||
store.getMetadata();
|
||||
store.getMetadata(null);
|
||||
fail("no index present - expected exception");
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// expected
|
||||
}
|
||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
||||
writer.commit();
|
||||
writer.close();
|
||||
metadata = store.getMetadata();
|
||||
metadata = store.getMetadata(null);
|
||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
||||
for (StoreFileMetaData meta : metadata) {
|
||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
|
@ -579,7 +578,7 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
writer.commit();
|
||||
writer.close();
|
||||
first = store.getMetadata();
|
||||
first = store.getMetadata(null);
|
||||
assertDeleteContent(store, directoryService);
|
||||
store.close();
|
||||
}
|
||||
|
@ -609,7 +608,7 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
writer.commit();
|
||||
writer.close();
|
||||
second = store.getMetadata();
|
||||
second = store.getMetadata(null);
|
||||
}
|
||||
Store.RecoveryDiff diff = first.recoveryDiff(second);
|
||||
assertThat(first.size(), equalTo(second.size()));
|
||||
|
@ -639,7 +638,7 @@ public class StoreTests extends ESTestCase {
|
|||
writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
|
||||
writer.commit();
|
||||
writer.close();
|
||||
Store.MetadataSnapshot metadata = store.getMetadata();
|
||||
Store.MetadataSnapshot metadata = store.getMetadata(null);
|
||||
StoreFileMetaData delFile = null;
|
||||
for (StoreFileMetaData md : metadata) {
|
||||
if (md.name().endsWith(".liv")) {
|
||||
|
@ -674,7 +673,7 @@ public class StoreTests extends ESTestCase {
|
|||
writer.addDocument(docs.get(0));
|
||||
writer.close();
|
||||
|
||||
Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
|
||||
Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null);
|
||||
Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
|
||||
if (delFile != null) {
|
||||
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
|
||||
|
@ -723,7 +722,7 @@ public class StoreTests extends ESTestCase {
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
Store.MetadataSnapshot firstMeta = store.getMetadata();
|
||||
Store.MetadataSnapshot firstMeta = store.getMetadata(null);
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
|
@ -738,7 +737,7 @@ public class StoreTests extends ESTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
Store.MetadataSnapshot secondMeta = store.getMetadata();
|
||||
Store.MetadataSnapshot secondMeta = store.getMetadata(null);
|
||||
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -785,13 +784,10 @@ public class StoreTests extends ESTestCase {
|
|||
final AtomicInteger count = new AtomicInteger(0);
|
||||
final ShardLock lock = new DummyShardLock(shardId);
|
||||
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, lock, new Store.OnClose() {
|
||||
@Override
|
||||
public void handle(ShardLock theLock) {
|
||||
assertEquals(shardId, theLock.getShardId());
|
||||
assertEquals(lock, theLock);
|
||||
count.incrementAndGet();
|
||||
}
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, lock, theLock -> {
|
||||
assertEquals(shardId, theLock.getShardId());
|
||||
assertEquals(lock, theLock);
|
||||
count.incrementAndGet();
|
||||
});
|
||||
assertEquals(count.get(), 0);
|
||||
|
||||
|
@ -917,11 +913,7 @@ public class StoreTests extends ESTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
Store.MetadataSnapshot metadata;
|
||||
if (randomBoolean()) {
|
||||
metadata = store.getMetadata();
|
||||
} else {
|
||||
metadata = store.getMetadata(deletionPolicy.snapshot());
|
||||
}
|
||||
metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());
|
||||
assertFalse(metadata.asMap().isEmpty());
|
||||
// do not check for correct files, we have enough tests for that above
|
||||
assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
|
||||
|
@ -982,7 +974,7 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
if (randomBoolean()) {
|
||||
store.getMetadata();
|
||||
store.getMetadata(null);
|
||||
} else {
|
||||
store.readLastCommittedSegmentsInfo();
|
||||
}
|
||||
|
|
|
@ -73,13 +73,13 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
});
|
||||
|
||||
public void testBuiltinMappers() {
|
||||
IndicesModule module = new IndicesModule(new NamedWriteableRegistry(), Collections.emptyList());
|
||||
IndicesModule module = new IndicesModule(Collections.emptyList());
|
||||
assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty());
|
||||
assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty());
|
||||
}
|
||||
|
||||
public void testBuiltinWithPlugins() {
|
||||
IndicesModule module = new IndicesModule(new NamedWriteableRegistry(), fakePlugins);
|
||||
IndicesModule module = new IndicesModule(fakePlugins);
|
||||
MapperRegistry registry = module.getMapperRegistry();
|
||||
assertThat(registry.getMapperParsers().size(), Matchers.greaterThan(1));
|
||||
assertThat(registry.getMetadataMapperParsers().size(), Matchers.greaterThan(1));
|
||||
|
@ -93,7 +93,7 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new IndicesModule(new NamedWriteableRegistry(), plugins));
|
||||
() -> new IndicesModule(plugins));
|
||||
assertThat(e.getMessage(), Matchers.containsString("already registered"));
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
};
|
||||
List<MapperPlugin> plugins = Arrays.asList(plugin, plugin);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new IndicesModule(new NamedWriteableRegistry(), plugins));
|
||||
() -> new IndicesModule(plugins));
|
||||
assertThat(e.getMessage(), Matchers.containsString("already registered"));
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new IndicesModule(new NamedWriteableRegistry(), plugins));
|
||||
() -> new IndicesModule(plugins));
|
||||
assertThat(e.getMessage(), Matchers.containsString("already registered"));
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
};
|
||||
List<MapperPlugin> plugins = Arrays.asList(plugin, plugin);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new IndicesModule(new NamedWriteableRegistry(), plugins));
|
||||
() -> new IndicesModule(plugins));
|
||||
assertThat(e.getMessage(), Matchers.containsString("already registered"));
|
||||
}
|
||||
|
||||
|
@ -143,19 +143,19 @@ public class IndicesModuleTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new IndicesModule(new NamedWriteableRegistry(), plugins));
|
||||
() -> new IndicesModule(plugins));
|
||||
assertThat(e.getMessage(), Matchers.containsString("cannot contain metadata mapper [_field_names]"));
|
||||
}
|
||||
|
||||
public void testFieldNamesIsLast() {
|
||||
IndicesModule module = new IndicesModule(new NamedWriteableRegistry(), Collections.emptyList());
|
||||
IndicesModule module = new IndicesModule(Collections.emptyList());
|
||||
List<String> fieldNames = module.getMapperRegistry().getMetadataMapperParsers().keySet()
|
||||
.stream().collect(Collectors.toList());
|
||||
assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1));
|
||||
}
|
||||
|
||||
public void testFieldNamesIsLastWithPlugins() {
|
||||
IndicesModule module = new IndicesModule(new NamedWriteableRegistry(), fakePlugins);
|
||||
IndicesModule module = new IndicesModule(fakePlugins);
|
||||
List<String> fieldNames = module.getMapperRegistry().getMetadataMapperParsers().keySet()
|
||||
.stream().collect(Collectors.toList());
|
||||
assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1));
|
||||
|
|
|
@ -97,7 +97,9 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
writer.addDocument(document);
|
||||
}
|
||||
writer.commit();
|
||||
Store.MetadataSnapshot metadata = store.getMetadata();
|
||||
writer.close();
|
||||
|
||||
Store.MetadataSnapshot metadata = store.getMetadata(null);
|
||||
List<StoreFileMetaData> metas = new ArrayList<>();
|
||||
for (StoreFileMetaData md : metadata) {
|
||||
metas.add(md);
|
||||
|
@ -116,14 +118,14 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata();
|
||||
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);
|
||||
Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
|
||||
assertEquals(metas.size(), recoveryDiff.identical.size());
|
||||
assertEquals(0, recoveryDiff.different.size());
|
||||
assertEquals(0, recoveryDiff.missing.size());
|
||||
IndexReader reader = DirectoryReader.open(targetStore.directory());
|
||||
assertEquals(numDocs, reader.maxDoc());
|
||||
IOUtils.close(reader, writer, store, targetStore);
|
||||
IOUtils.close(reader, store, targetStore);
|
||||
}
|
||||
|
||||
public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
|
||||
|
@ -157,7 +159,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
Store.MetadataSnapshot metadata = store.getMetadata();
|
||||
Store.MetadataSnapshot metadata = store.getMetadata(null);
|
||||
List<StoreFileMetaData> metas = new ArrayList<>();
|
||||
for (StoreFileMetaData md : metadata) {
|
||||
metas.add(md);
|
||||
|
@ -221,7 +223,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
Store.MetadataSnapshot metadata = store.getMetadata();
|
||||
Store.MetadataSnapshot metadata = store.getMetadata(null);
|
||||
List<StoreFileMetaData> metas = new ArrayList<>();
|
||||
for (StoreFileMetaData md : metadata) {
|
||||
metas.add(md);
|
||||
|
|
|
@ -19,11 +19,15 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
|
@ -33,13 +37,14 @@ import org.joda.time.DateTimeZone;
|
|||
public class DocValueFormatTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry();
|
||||
registry.register(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN);
|
||||
registry.register(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new);
|
||||
registry.register(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new);
|
||||
registry.register(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH);
|
||||
registry.register(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP);
|
||||
registry.register(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW);
|
||||
List<Entry> entries = new ArrayList<>();
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN));
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new));
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new));
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH));
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP));
|
||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW));
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(entries);
|
||||
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeNamedWriteable(DocValueFormat.BOOLEAN);
|
||||
|
|
|
@ -71,7 +71,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class,
|
||||
() -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, singletonList(registersDupeHighlighter)));
|
||||
() -> new SearchModule(Settings.EMPTY, false, singletonList(registersDupeHighlighter)));
|
||||
|
||||
SearchPlugin registersDupeSuggester = new SearchPlugin() {
|
||||
@Override
|
||||
|
@ -80,7 +80,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class,
|
||||
() -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, singletonList(registersDupeSuggester)));
|
||||
() -> new SearchModule(Settings.EMPTY, false, singletonList(registersDupeSuggester)));
|
||||
|
||||
SearchPlugin registersDupeScoreFunction = new SearchPlugin() {
|
||||
@Override
|
||||
|
@ -90,7 +90,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class,
|
||||
() -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, singletonList(registersDupeScoreFunction)));
|
||||
() -> new SearchModule(Settings.EMPTY, false, singletonList(registersDupeScoreFunction)));
|
||||
|
||||
SearchPlugin registersDupeSignificanceHeuristic = new SearchPlugin() {
|
||||
@Override
|
||||
|
@ -98,7 +98,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
return singletonList(new SearchExtensionSpec<>(ChiSquare.NAME, ChiSquare::new, ChiSquare.PARSER));
|
||||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false,
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, false,
|
||||
singletonList(registersDupeSignificanceHeuristic)));
|
||||
|
||||
SearchPlugin registersDupeMovAvgModel = new SearchPlugin() {
|
||||
|
@ -107,7 +107,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
return singletonList(new SearchExtensionSpec<>(SimpleModel.NAME, SimpleModel::new, SimpleModel.PARSER));
|
||||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false,
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, false,
|
||||
singletonList(registersDupeMovAvgModel)));
|
||||
|
||||
SearchPlugin registersDupeFetchSubPhase = new SearchPlugin() {
|
||||
|
@ -116,7 +116,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
return singletonList(new ExplainFetchSubPhase());
|
||||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false,
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, false,
|
||||
singletonList(registersDupeFetchSubPhase)));
|
||||
|
||||
SearchPlugin registersDupeFetchQuery = new SearchPlugin() {
|
||||
|
@ -124,12 +124,12 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
return singletonList(new QuerySpec<>(TermQueryBuilder.NAME, TermQueryBuilder::new, TermQueryBuilder::fromXContent));
|
||||
}
|
||||
};
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false,
|
||||
expectThrows(IllegalArgumentException.class, () -> new SearchModule(Settings.EMPTY, false,
|
||||
singletonList(registersDupeFetchQuery)));
|
||||
}
|
||||
|
||||
public void testRegisterSuggester() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public Map<String, Suggester<?>> getSuggesters() {
|
||||
return singletonMap("custom", CustomSuggester.INSTANCE);
|
||||
|
@ -143,7 +143,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
|
||||
public void testRegisterHighlighter() {
|
||||
CustomHighlighter customHighlighter = new CustomHighlighter();
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public Map<String, Highlighter> getHighlighters() {
|
||||
return singletonMap("custom", customHighlighter);
|
||||
|
@ -158,7 +158,7 @@ public class SearchModuleTests extends ModuleTestCase {
|
|||
}
|
||||
|
||||
public void testRegisteredQueries() throws IOException {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, emptyList());
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
List<String> allSupportedQueries = new ArrayList<>();
|
||||
Collections.addAll(allSupportedQueries, NON_DEPRECATED_QUERIES);
|
||||
Collections.addAll(allSupportedQueries, DEPRECATED_QUERIES);
|
||||
|
|
|
@ -56,11 +56,15 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
|
||||
|
@ -97,7 +101,6 @@ public class AggregatorParsingTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false).build();
|
||||
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
|
||||
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
|
@ -108,25 +111,31 @@ public class AggregatorParsingTests extends ESTestCase {
|
|||
List<Setting<?>> scriptSettings = scriptModule.getSettings();
|
||||
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
|
||||
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
|
||||
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
injector = new ModulesBuilder().add(
|
||||
(b) -> {
|
||||
b.bind(Environment.class).toInstance(new Environment(settings));
|
||||
b.bind(ThreadPool.class).toInstance(threadPool);
|
||||
b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
|
||||
},
|
||||
settingsModule,
|
||||
new IndicesModule(namedWriteableRegistry, Collections.emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
}, new SearchModule(settings, namedWriteableRegistry, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
}, new IndexSettingsModule(index, settings),
|
||||
|
||||
settingsModule, indicesModule, searchModule,
|
||||
new IndexSettingsModule(index, settings),
|
||||
new AbstractModule() {
|
||||
@Override
|
||||
protected void configure() {
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -126,7 +127,6 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
|
|||
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
|
||||
.build();
|
||||
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
final ClusterService clusterService = createClusterService(threadPool);
|
||||
|
@ -136,6 +136,22 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
|
|||
List<Setting<?>> scriptSettings = scriptModule.getSettings();
|
||||
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
|
||||
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
return new ModulesBuilder().add(
|
||||
(b) -> {
|
||||
b.bind(Environment.class).toInstance(new Environment(settings));
|
||||
|
@ -145,20 +161,7 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
|
|||
b.bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
|
||||
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
},
|
||||
settingsModule,
|
||||
new IndicesModule(namedWriteableRegistry, Collections.emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
},
|
||||
new SearchModule(settings, namedWriteableRegistry, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
},
|
||||
new IndexSettingsModule(index, settings)
|
||||
settingsModule, indicesModule, searchModule, new IndexSettingsModule(index, settings)
|
||||
).createInjector();
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ public class MissingValueIT extends ESIntegTestCase {
|
|||
assertSearchResponse(response);
|
||||
Histogram histogram = response.getAggregations().get("my_histogram");
|
||||
assertEquals(1, histogram.getBuckets().size());
|
||||
assertEquals(10L, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(10d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
|
||||
}
|
||||
|
||||
|
@ -133,16 +133,16 @@ public class MissingValueIT extends ESIntegTestCase {
|
|||
assertSearchResponse(response);
|
||||
Histogram histogram = response.getAggregations().get("my_histogram");
|
||||
assertEquals(2, histogram.getBuckets().size());
|
||||
assertEquals(0L, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(0d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
assertEquals(5L, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(5d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
response = client().prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get();
|
||||
assertSearchResponse(response);
|
||||
histogram = response.getAggregations().get("my_histogram");
|
||||
assertEquals(1, histogram.getBuckets().size());
|
||||
assertEquals(0L, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(0d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
|
||||
}
|
||||
|
||||
|
|
|
@ -841,7 +841,7 @@ public class DateHistogramIT extends ESIntegTestCase {
|
|||
|
||||
Histogram.Bucket bucket = buckets.get(1);
|
||||
assertThat(bucket, Matchers.notNullValue());
|
||||
assertThat(bucket.getKeyAsString(), equalTo("1"));
|
||||
assertThat(bucket.getKeyAsString(), equalTo("1.0"));
|
||||
|
||||
Histogram dateHisto = bucket.getAggregations().get("date_histo");
|
||||
assertThat(dateHisto, Matchers.notNullValue());
|
||||
|
|
|
@ -97,7 +97,7 @@ public class DateHistogramOffsetIT extends ESIntegTestCase {
|
|||
assertThat(response.getHits().getTotalHits(), equalTo(5L));
|
||||
|
||||
Histogram histo = response.getAggregations().get("date_histo");
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
List<Histogram.Bucket> buckets = histo.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(2));
|
||||
|
||||
checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 2, 0, DateTimeZone.UTC), 2L);
|
||||
|
|
|
@ -20,11 +20,10 @@
|
|||
package org.elasticsearch.search.aggregations.bucket;
|
||||
|
||||
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
|
||||
public class DateHistogramTests extends BaseAggregationTestCase<DateHistogramAggregationBuilder> {
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.script.MockScriptPlugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.Max;
|
||||
|
@ -84,7 +83,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
|
||||
|
||||
scripts.put("_value + 1", vars -> {
|
||||
long value = (long) vars.get("_value");
|
||||
double value = (double) vars.get("_value");
|
||||
return value + 1L;
|
||||
});
|
||||
|
||||
|
@ -377,7 +376,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
assertThat(sum.getValue(), equalTo((double) s));
|
||||
assertEquals(propertiesKeys[i], (long) i * interval);
|
||||
assertEquals(propertiesKeys[i], (double) i * interval);
|
||||
assertThat(propertiesDocCounts[i], equalTo(valueCounts[i]));
|
||||
assertThat(propertiesCounts[i], equalTo((double) s));
|
||||
}
|
||||
|
@ -762,7 +761,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
histogram("histo")
|
||||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) -1 * 2 * interval, (long) valueCounts.length * interval)))
|
||||
.extendedBounds(-1 * 2 * interval, valueCounts.length * interval))
|
||||
.get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
@ -853,7 +852,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.interval(interval)
|
||||
.minDocCount(0)
|
||||
.extendedBounds(new ExtendedBounds(boundsMin, boundsMax)))
|
||||
.extendedBounds(boundsMin, boundsMax))
|
||||
.execute().actionGet();
|
||||
|
||||
if (invalidBoundsError) {
|
||||
|
@ -930,7 +929,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
.field(SINGLE_VALUED_FIELD_NAME)
|
||||
.interval(interval)
|
||||
.minDocCount(0)
|
||||
.extendedBounds(new ExtendedBounds(boundsMin, boundsMax)))
|
||||
.extendedBounds(boundsMin, boundsMax))
|
||||
.execute().actionGet();
|
||||
|
||||
if (invalidBoundsError) {
|
||||
|
@ -973,7 +972,27 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).execute().actionGet();
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.toString(), containsString("[interval] must be 1 or greater for histogram aggregation [histo]"));
|
||||
assertThat(e.toString(), containsString("[interval] must be >0 for histogram aggregation [histo]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDecimalIntervalAndOffset() throws Exception {
|
||||
assertAcked(prepareCreate("decimal_values").addMapping("type", "d", "type=float").get());
|
||||
indexRandom(true,
|
||||
client().prepareIndex("decimal_values", "type", "1").setSource("d", -0.6),
|
||||
client().prepareIndex("decimal_values", "type", "2").setSource("d", 0.1));
|
||||
|
||||
SearchResponse r = client().prepareSearch("decimal_values")
|
||||
.addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05))
|
||||
.get();
|
||||
assertSearchResponse(r);
|
||||
|
||||
Histogram histogram = r.getAggregations().get("histo");
|
||||
List<Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(2, buckets.size());
|
||||
assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d);
|
||||
assertEquals(1, buckets.get(0).getDocCount());
|
||||
assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d);
|
||||
assertEquals(1, buckets.get(1).getDocCount());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
package org.elasticsearch.search.aggregations.bucket;
|
||||
|
||||
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
|
||||
public class HistogramTests extends BaseAggregationTestCase<HistogramAggregationBuilder> {
|
||||
|
||||
|
@ -31,9 +29,9 @@ public class HistogramTests extends BaseAggregationTestCase<HistogramAggregation
|
|||
protected HistogramAggregationBuilder createTestAggregatorBuilder() {
|
||||
HistogramAggregationBuilder factory = new HistogramAggregationBuilder("foo");
|
||||
factory.field(INT_FIELD_NAME);
|
||||
factory.interval(randomIntBetween(1, 100000));
|
||||
factory.interval(randomDouble() * 1000);
|
||||
if (randomBoolean()) {
|
||||
factory.extendedBounds(ExtendedBoundsTests.randomExtendedBounds());
|
||||
factory.extendedBounds(randomDouble(), randomDouble());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
factory.format("###.##");
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.SearchParseException;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
|
|
@ -107,8 +107,8 @@ public class SignificanceHeuristicTests extends ESTestCase {
|
|||
// read
|
||||
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
|
||||
StreamInput in = new InputStreamStreamInput(inBuffer);
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry();
|
||||
new SearchModule(Settings.EMPTY, registry, false, emptyList()); // populates the registry through side effects
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); // populates the registry through side effects
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
in = new NamedWriteableAwareStreamInput(in, registry);
|
||||
in.setVersion(version);
|
||||
InternalMappedSignificantTerms<?, ?> read = (InternalMappedSignificantTerms<?, ?>) in.readNamedWriteable(InternalAggregation.class);
|
||||
|
@ -217,7 +217,7 @@ public class SignificanceHeuristicTests extends ESTestCase {
|
|||
// 1. The output of the builders can actually be parsed
|
||||
// 2. The parser does not swallow parameters after a significance heuristic was defined
|
||||
public void testBuilderAndParser() throws Exception {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
ParseFieldRegistry<SignificanceHeuristicParser> heuristicParserMapper = searchModule.getSignificanceHeuristicParserRegistry();
|
||||
SearchContext searchContext = new SignificantTermsTestSearchContext();
|
||||
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Aggregations module
|
||||
*/
|
||||
package org.elasticsearch.search.aggregations;
|
||||
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.pipeline;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
|
@ -95,7 +94,7 @@ public class AvgBucketIT extends ESIntegTestCase {
|
|||
public void testDocCountTopLevel() throws Exception {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue)))
|
||||
.extendedBounds(minRandomValue, maxRandomValue))
|
||||
.addAggregation(avgBucket("avg_bucket", "histo>_count")).execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
@ -133,7 +132,7 @@ public class AvgBucketIT extends ESIntegTestCase {
|
|||
.order(Order.term(true))
|
||||
.subAggregation(
|
||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue)))
|
||||
.extendedBounds(minRandomValue, maxRandomValue))
|
||||
.subAggregation(avgBucket("avg_bucket", "histo>_count"))).execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
@ -215,7 +214,7 @@ public class AvgBucketIT extends ESIntegTestCase {
|
|||
.order(Order.term(true))
|
||||
.subAggregation(
|
||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue))
|
||||
.extendedBounds(minRandomValue, maxRandomValue)
|
||||
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
|
||||
.subAggregation(avgBucket("avg_bucket", "histo>sum"))).execute().actionGet();
|
||||
|
||||
|
@ -268,7 +267,7 @@ public class AvgBucketIT extends ESIntegTestCase {
|
|||
.order(Order.term(true))
|
||||
.subAggregation(
|
||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue))
|
||||
.extendedBounds(minRandomValue, maxRandomValue)
|
||||
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
|
||||
.subAggregation(avgBucket("avg_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)))
|
||||
.execute().actionGet();
|
||||
|
@ -341,7 +340,7 @@ public class AvgBucketIT extends ESIntegTestCase {
|
|||
.order(Order.term(true))
|
||||
.subAggregation(
|
||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue)))
|
||||
.extendedBounds(minRandomValue, maxRandomValue))
|
||||
.subAggregation(avgBucket("avg_histo_bucket", "histo>_count")))
|
||||
.addAggregation(avgBucket("avg_terms_bucket", "terms>avg_histo_bucket")).execute().actionGet();
|
||||
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.elasticsearch.script.MockScriptPlugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -166,10 +164,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -212,10 +210,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -256,10 +254,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -299,10 +297,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -347,10 +345,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -393,10 +391,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -446,10 +444,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
@ -492,7 +490,7 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
assertThat(deriv.getBuckets().size(), equalTo(0));
|
||||
|
@ -515,10 +513,10 @@ public class BucketScriptIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
|
||||
for (int i = 0; i < buckets.size(); ++i) {
|
||||
Histogram.Bucket bucket = buckets.get(i);
|
||||
|
|
|
@ -29,8 +29,7 @@ import org.elasticsearch.script.Script;
|
|||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -189,7 +188,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -222,7 +221,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -255,7 +254,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -278,7 +277,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -310,7 +309,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -344,7 +343,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -377,7 +376,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -410,7 +409,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -449,7 +448,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -482,7 +481,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
assertThat(deriv.getBuckets().size(), equalTo(0));
|
||||
|
@ -504,7 +503,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -531,7 +530,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
histogram("inner_histo")
|
||||
.field(FIELD_1_NAME)
|
||||
.interval(1)
|
||||
.extendedBounds(new ExtendedBounds(1L, 4L))
|
||||
.extendedBounds(1L, 4L)
|
||||
.minDocCount(0)
|
||||
.subAggregation(derivative("derivative", "_count")
|
||||
.gapPolicy(GapPolicy.INSERT_ZEROS))))
|
||||
|
@ -539,7 +538,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -547,7 +546,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsString(), equalTo("1"));
|
||||
assertThat(bucket.getKeyAsString(), equalTo("1.0"));
|
||||
Histogram innerHisto = bucket.getAggregations().get("inner_histo");
|
||||
assertThat(innerHisto, notNullValue());
|
||||
List<? extends Histogram.Bucket> innerBuckets = innerHisto.getBuckets();
|
||||
|
@ -564,7 +563,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
|
||||
bucket = buckets.get(1);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsString(), equalTo("2"));
|
||||
assertThat(bucket.getKeyAsString(), equalTo("2.0"));
|
||||
innerHisto = bucket.getAggregations().get("inner_histo");
|
||||
assertThat(innerHisto, notNullValue());
|
||||
innerBuckets = innerHisto.getBuckets();
|
||||
|
@ -580,7 +579,7 @@ public class BucketSelectorIT extends ESIntegTestCase {
|
|||
}
|
||||
bucket = buckets.get(2);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsString(), equalTo("3"));
|
||||
assertThat(bucket.getKeyAsString(), equalTo("3.0"));
|
||||
innerHisto = bucket.getAggregations().get("inner_histo");
|
||||
assertThat(innerHisto, notNullValue());
|
||||
innerBuckets = innerHisto.getBuckets();
|
||||
|
|
|
@ -89,7 +89,7 @@ public class CumulativeSumIT extends ESIntegTestCase {
|
|||
public void testDocCount() throws Exception {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue))
|
||||
.extendedBounds(minRandomValue, maxRandomValue)
|
||||
.subAggregation(cumulativeSum("cumulative_sum", "_count"))).execute().actionGet();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
@ -120,7 +120,7 @@ public class CumulativeSumIT extends ESIntegTestCase {
|
|||
.prepareSearch("idx")
|
||||
.addAggregation(
|
||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||
.extendedBounds(new ExtendedBounds((long) minRandomValue, (long) maxRandomValue))
|
||||
.extendedBounds(minRandomValue, maxRandomValue)
|
||||
.subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
|
||||
.subAggregation(cumulativeSum("cumulative_sum", "sum"))).execute().actionGet();
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
|||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
|
||||
import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath;
|
||||
|
@ -118,7 +117,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -161,7 +160,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -221,7 +220,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -259,7 +258,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -299,7 +298,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -345,7 +344,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram histo = response.getAggregations().get("histo");
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
|
@ -413,7 +412,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
@ -468,7 +467,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
assertThat(deriv.getBuckets().size(), equalTo(0));
|
||||
|
@ -483,7 +482,7 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
|
||||
assertSearchResponse(response);
|
||||
|
||||
InternalHistogram deriv = response.getAggregations().get("histo");
|
||||
Histogram deriv = response.getAggregations().get("histo");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue