Merge branch 'master' into ccr

* master:
  Stop skipping REST test after backport of #27056
  Fix default value of ignore_unavailable for snapshot REST API (#27056)
  Add composite aggregator (#26800)
  Fix `ShardSplittingQuery` to respect nested documents. (#27398)
  [Docs] Restore section about multi-level parent/child relation in parent-join (#27392)
  Add TcpChannel to unify Transport implementations (#27132)
  Add note on plugin distributions in plugins folder
  Remove implementations of `TransportChannel` (#27388)
  Update Google SDK to version 1.23 (#27381)
  Fix Gradle 4.3.1 compatibility for logging (#27382)
  [Test] Change Elasticsearch startup timeout to 120s in packaging tests
  Docs/windows installer (#27369)
This commit is contained in:
Jason Tedor 2017-11-16 11:06:09 -05:00
commit c1e22572b3
146 changed files with 6836 additions and 1271 deletions

View File

@ -242,6 +242,7 @@ subprojects {
"org.elasticsearch.plugin:parent-join-client:${version}": ':modules:parent-join',
"org.elasticsearch.plugin:aggs-matrix-stats-client:${version}": ':modules:aggs-matrix-stats',
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
"org.elasticsearch.plugin:aggs-composite-client:${version}": ':modules:aggs-composite',
]
if (indexCompatVersions[-1].snapshot) {
/* The last and second to last versions can be snapshots. Rather than use

View File

@ -19,6 +19,8 @@
import java.nio.file.Files
import org.gradle.util.GradleVersion
apply plugin: 'groovy'
group = 'org.elasticsearch.gradle'
@ -99,9 +101,11 @@ dependencies {
// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs
// Use logging dependency instead
// Gradle 4.3.1 stopped releasing the logging jars to jcenter, just use the last available one
GradleVersion logVersion = GradleVersion.current() > GradleVersion.version('4.3') ? GradleVersion.version('4.3') : GradleVersion.current()
dependencies {
compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}"
compileOnly "org.gradle:gradle-logging:${logVersion.getVersion()}"
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1
}

View File

@ -39,7 +39,8 @@ dependencies {
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile "org.elasticsearch.plugin:parent-join-client:${version}"
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
compile "org.elasticsearch.plugin:aggs-composite-client:${version}"
testCompile "org.elasticsearch.client:test:${version}"
testCompile "org.elasticsearch.test:framework:${version}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"

View File

@ -62,6 +62,7 @@ import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.composite.CompositeAggregationBuilder;
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.test.ESTestCase;
@ -647,7 +648,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testProvidedNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getProvidedNamedXContents();
assertEquals(2, namedXContents.size());
assertEquals(3, namedXContents.size());
Map<Class<?>, Integer> categories = new HashMap<>();
List<String> names = new ArrayList<>();
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
@ -658,9 +659,10 @@ public class RestHighLevelClientTests extends ESTestCase {
}
}
assertEquals(1, categories.size());
assertEquals(Integer.valueOf(2), categories.get(Aggregation.class));
assertEquals(Integer.valueOf(3), categories.get(Aggregation.class));
assertTrue(names.contains(ChildrenAggregationBuilder.NAME));
assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME));
assertTrue(names.contains(CompositeAggregationBuilder.NAME));
}
private static class TrackingActionListener implements ActionListener<Integer> {

View File

@ -32,6 +32,7 @@ dependencies {
compile "org.elasticsearch.plugin:lang-mustache-client:${version}"
compile "org.elasticsearch.plugin:percolator-client:${version}"
compile "org.elasticsearch.plugin:parent-join-client:${version}"
compile "org.elasticsearch.plugin:aggs-composite-client:${version}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"

View File

@ -30,6 +30,7 @@ import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.search.aggregations.composite.CompositeAggregationPlugin;
import org.elasticsearch.transport.Netty4Plugin;
import java.util.Arrays;
@ -44,6 +45,7 @@ import java.util.concurrent.TimeUnit;
* {@link PercolatorPlugin},
* {@link MustachePlugin},
* {@link ParentJoinPlugin}
* {@link CompositeAggregationPlugin}
* plugins for the client. These plugins are all the required modules for Elasticsearch.
*/
@SuppressWarnings({"unchecked","varargs"})
@ -88,7 +90,8 @@ public class PreBuiltTransportClient extends TransportClient {
ReindexPlugin.class,
PercolatorPlugin.class,
MustachePlugin.class,
ParentJoinPlugin.class));
ParentJoinPlugin.class,
CompositeAggregationPlugin.class));
/**
* Creates a new transport client with pre-installed plugins.

View File

@ -30,6 +30,7 @@ import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty4Plugin;
import org.elasticsearch.search.aggregations.composite.CompositeAggregationPlugin;
import org.junit.Test;
import java.util.Arrays;
@ -52,7 +53,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
@Test
public void testInstallPluginTwice() {
for (Class<? extends Plugin> plugin :
Arrays.asList(ParentJoinPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)) {
Arrays.asList(ParentJoinPlugin.class, ReindexPlugin.class, PercolatorPlugin.class,
MustachePlugin.class, CompositeAggregationPlugin.class)) {
try {
new PreBuiltTransportClient(Settings.EMPTY, plugin);
fail("exception expected");

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.CheckedConsumer;
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
/**
@ -69,6 +70,42 @@ public interface ActionListener<Response> {
};
}
/**
* Creates a listener that listens for a response (or failure) and executes the
* corresponding runnable when the response (or failure) is received.
*
* @param runnable the runnable that will be called in event of success or failure
* @param <Response> the type of the response
* @return a listener that listens for responses and invokes the runnable when received
*/
static <Response> ActionListener<Response> wrap(Runnable runnable) {
return wrap(r -> runnable.run(), e -> runnable.run());
}
/**
* Converts a listener to a {@link BiConsumer} for compatibility with the {@link java.util.concurrent.CompletableFuture}
* api.
*
* @param listener that will be wrapped
* @param <Response> the type of the response
* @return a bi consumer that will complete the wrapped listener
*/
static <Response> BiConsumer<Response, Throwable> toBiConsumer(ActionListener<Response> listener) {
return (response, throwable) -> {
if (throwable == null) {
listener.onResponse(response);
} else {
if (throwable instanceof Exception) {
listener.onFailure((Exception) throwable);
} else if (throwable instanceof Error) {
throw (Error) throwable;
} else {
throw new AssertionError("Should have been either Error or Exception", throwable);
}
}
};
}
/**
* Notifies every given listener with the response passed to {@link #onResponse(Object)}. If a listener itself throws an exception
* the exception is forwarded to {@link #onFailure(Exception)}. If in turn {@link #onFailure(Exception)} fails all remaining

View File

@ -380,8 +380,9 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
* @param source snapshot definition
* @return this request
*/
@SuppressWarnings("unchecked")
public CreateSnapshotRequest source(Map<String, Object> source) {
for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("indices")) {
if (entry.getValue() instanceof String) {
@ -402,7 +403,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state");
}
}
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
indicesOptions(IndicesOptions.fromMap(source, indicesOptions));
return this;
}

View File

@ -505,6 +505,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
* @param source restore definition
* @return this request
*/
@SuppressWarnings("unchecked")
public RestoreSnapshotRequest source(Map<String, Object> source) {
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
@ -558,7 +559,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
}
}
}
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
indicesOptions(IndicesOptions.fromMap(source, indicesOptions));
return this;
}

View File

@ -235,7 +235,7 @@ public final class IndexSortConfig {
SortField.Type.FLOAT
);
static SortField.Type getSortFieldType(SortField sortField) {
public static SortField.Type getSortFieldType(SortField sortField) {
if (sortField instanceof SortedSetSortField) {
return SortField.Type.STRING;
} else if (sortField instanceof SortedNumericSortField) {

View File

@ -19,9 +19,11 @@
package org.elasticsearch.index.shard;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@ -33,19 +35,23 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import java.io.IOException;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.IntPredicate;
import java.util.function.Predicate;
/**
@ -56,16 +62,17 @@ import java.util.function.Predicate;
final class ShardSplittingQuery extends Query {
private final IndexMetaData indexMetaData;
private final int shardId;
private final BitSetProducer nestedParentBitSetProducer;
ShardSplittingQuery(IndexMetaData indexMetaData, int shardId) {
ShardSplittingQuery(IndexMetaData indexMetaData, int shardId, boolean hasNested) {
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_rc2)) {
throw new IllegalArgumentException("Splitting query can only be executed on an index created with version "
+ Version.V_6_0_0_rc2 + " or higher");
}
this.indexMetaData = indexMetaData;
this.shardId = shardId;
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
return new ConstantScoreWeight(this, boost) {
@ -84,44 +91,87 @@ final class ShardSplittingQuery extends Query {
Uid.decodeId(ref.bytes, ref.offset, ref.length), null);
return shardId == targetShardId;
};
if (terms == null) { // this is the common case - no partitioning and no _routing values
if (terms == null) {
// this is the common case - no partitioning and no _routing values
// in this case we also don't do anything special with regards to nested docs since we basically delete
// by ID and parent and nested all have the same id.
assert indexMetaData.isRoutingPartitionedIndex() == false;
findSplitDocs(IdFieldMapper.NAME, includeInShard, leafReader, bitSet::set);
} else {
final BitSet parentBitSet;
if (nestedParentBitSetProducer == null) {
parentBitSet = null;
} else {
parentBitSet = nestedParentBitSetProducer.getBitSet(context);
if (parentBitSet == null) {
return null; // no matches
}
}
if (indexMetaData.isRoutingPartitionedIndex()) {
// this is the heaviest invariant. Here we have to visit all docs stored fields do extract _id and _routing
// this this index is routing partitioned.
Visitor visitor = new Visitor();
return new ConstantScoreScorer(this, score(),
new RoutingPartitionedDocIdSetIterator(leafReader, visitor));
Visitor visitor = new Visitor(leafReader);
TwoPhaseIterator twoPhaseIterator =
parentBitSet == null ? new RoutingPartitionedDocIdSetIterator(visitor) :
new NestedRoutingPartitionedDocIdSetIterator(visitor, parentBitSet);
return new ConstantScoreScorer(this, score(), twoPhaseIterator);
} else {
// here we potentially guard the docID consumers with our parent bitset if we have one.
// this ensures that we are only marking root documents in the nested case and if necessary
// we do a second pass to mark the corresponding children in markChildDocs
Function<IntConsumer, IntConsumer> maybeWrapConsumer = consumer -> {
if (parentBitSet != null) {
return docId -> {
if (parentBitSet.get(docId)) {
consumer.accept(docId);
}
};
}
return consumer;
};
// in the _routing case we first go and find all docs that have a routing value and mark the ones we have to delete
findSplitDocs(RoutingFieldMapper.NAME, ref -> {
int targetShardId = OperationRouting.generateShardId(indexMetaData, null, ref.utf8ToString());
return shardId == targetShardId;
}, leafReader, bitSet::set);
}, leafReader, maybeWrapConsumer.apply(bitSet::set));
// now if we have a mixed index where some docs have a _routing value and some don't we have to exclude the ones
// with a routing value from the next iteration an delete / select based on the ID.
if (terms.getDocCount() != leafReader.maxDoc()) {
// this is a special case where some of the docs have no routing values this sucks but it's possible today
FixedBitSet hasRoutingValue = new FixedBitSet(leafReader.maxDoc());
findSplitDocs(RoutingFieldMapper.NAME, ref -> false, leafReader,
hasRoutingValue::set);
findSplitDocs(RoutingFieldMapper.NAME, ref -> false, leafReader, maybeWrapConsumer.apply(hasRoutingValue::set));
IntConsumer bitSetConsumer = maybeWrapConsumer.apply(bitSet::set);
findSplitDocs(IdFieldMapper.NAME, includeInShard, leafReader, docId -> {
if (hasRoutingValue.get(docId) == false) {
bitSet.set(docId);
bitSetConsumer.accept(docId);
}
});
}
}
if (parentBitSet != null) {
// if nested docs are involved we also need to mark all child docs that belong to a matching parent doc.
markChildDocs(parentBitSet, bitSet);
}
}
return new ConstantScoreScorer(this, score(), new BitSetIterator(bitSet, bitSet.length()));
}
};
}
private void markChildDocs(BitSet parentDocs, BitSet matchingDocs) {
int currentDeleted = 0;
while (currentDeleted < matchingDocs.length() &&
(currentDeleted = matchingDocs.nextSetBit(currentDeleted)) != DocIdSetIterator.NO_MORE_DOCS) {
int previousParent = parentDocs.prevSetBit(Math.max(0, currentDeleted-1));
for (int i = previousParent + 1; i < currentDeleted; i++) {
matchingDocs.set(i);
}
currentDeleted++;
}
}
@Override
public String toString(String field) {
return "shard_splitting_query";
@ -145,8 +195,8 @@ final class ShardSplittingQuery extends Query {
return classHash() ^ result;
}
private static void findSplitDocs(String idField, Predicate<BytesRef> includeInShard,
LeafReader leafReader, IntConsumer consumer) throws IOException {
private static void findSplitDocs(String idField, Predicate<BytesRef> includeInShard, LeafReader leafReader,
IntConsumer consumer) throws IOException {
Terms terms = leafReader.terms(idField);
TermsEnum iterator = terms.iterator();
BytesRef idTerm;
@ -162,15 +212,17 @@ final class ShardSplittingQuery extends Query {
}
}
private static final class Visitor extends StoredFieldVisitor {
int leftToVisit = 2;
final BytesRef spare = new BytesRef();
String routing;
String id;
/* this class is a stored fields visitor that reads _id and/or _routing from the stored fields which is necessary in the case
of a routing partitioned index sine otherwise we would need to un-invert the _id and _routing field which is memory heavy */
private final class Visitor extends StoredFieldVisitor {
final LeafReader leafReader;
private int leftToVisit = 2;
private final BytesRef spare = new BytesRef();
private String routing;
private String id;
void reset() {
routing = id = null;
leftToVisit = 2;
Visitor(LeafReader leafReader) {
this.leafReader = leafReader;
}
@Override
@ -210,29 +262,32 @@ final class ShardSplittingQuery extends Query {
return leftToVisit == 0 ? Status.STOP : Status.NO;
}
}
boolean matches(int doc) throws IOException {
routing = id = null;
leftToVisit = 2;
leafReader.document(doc, this);
assert id != null : "docID must not be null - we might have hit a nested document";
int targetShardId = OperationRouting.generateShardId(indexMetaData, id, routing);
return targetShardId != shardId;
}
}
/**
* This two phase iterator visits every live doc and selects all docs that don't belong into this
* shard based on their id and routing value. This is only used in a routing partitioned index.
*/
private final class RoutingPartitionedDocIdSetIterator extends TwoPhaseIterator {
private final LeafReader leafReader;
private static final class RoutingPartitionedDocIdSetIterator extends TwoPhaseIterator {
private final Visitor visitor;
RoutingPartitionedDocIdSetIterator(LeafReader leafReader, Visitor visitor) {
super(DocIdSetIterator.all(leafReader.maxDoc())); // we iterate all live-docs
this.leafReader = leafReader;
RoutingPartitionedDocIdSetIterator(Visitor visitor) {
super(DocIdSetIterator.all(visitor.leafReader.maxDoc())); // we iterate all live-docs
this.visitor = visitor;
}
@Override
public boolean matches() throws IOException {
int doc = approximation.docID();
visitor.reset();
leafReader.document(doc, visitor);
int targetShardId = OperationRouting.generateShardId(indexMetaData, visitor.id, visitor.routing);
return targetShardId != shardId;
return visitor.matches(approximation.docID());
}
@Override
@ -240,6 +295,58 @@ final class ShardSplittingQuery extends Query {
return 42; // that's obvious, right?
}
}
/**
* This TwoPhaseIterator marks all nested docs of matching parents as matches as well.
*/
private static final class NestedRoutingPartitionedDocIdSetIterator extends TwoPhaseIterator {
private final Visitor visitor;
private final BitSet parentDocs;
private int nextParent = -1;
private boolean nextParentMatches;
NestedRoutingPartitionedDocIdSetIterator(Visitor visitor, BitSet parentDocs) {
super(DocIdSetIterator.all(visitor.leafReader.maxDoc())); // we iterate all live-docs
this.parentDocs = parentDocs;
this.visitor = visitor;
}
@Override
public boolean matches() throws IOException {
// the educated reader might ask why this works, it does because all live doc ids (root docs and nested docs) are evaluated in
// order and that way we don't need to seek backwards as we do in other nested docs cases.
int doc = approximation.docID();
if (doc > nextParent) {
// we only check once per nested/parent set
nextParent = parentDocs.nextSetBit(doc);
// never check a child document against the visitor, they neihter have _id nor _routing as stored fields
nextParentMatches = visitor.matches(nextParent);
}
return nextParentMatches;
}
@Override
public float matchCost() {
return 42; // that's obvious, right?
}
}
/*
* this is used internally to obtain a bitset for parent documents. We don't cache this since we never access the same reader more
* than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is
* executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either.
*/
private static BitSetProducer newParentDocBitSetProducer() {
return context -> {
Query query = Queries.newNonNestedFilter();
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
Scorer s = weight.scorer(context);
return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc());
};
}
}

View File

@ -115,6 +115,7 @@ final class StoreRecovery {
indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
// now that the mapping is merged we can validate the index sort configuration.
Sort indexSort = indexShard.getIndexSort();
final boolean hasNested = indexShard.mapperService().hasNested();
final boolean isSplit = sourceMetaData.getNumberOfShards() < indexShard.indexSettings().getNumberOfShards();
assert isSplit == false || sourceMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1) : "for split we require a " +
"single type but the index is created before 6.0.0";
@ -127,7 +128,7 @@ final class StoreRecovery {
final long maxUnsafeAutoIdTimestamp =
shards.stream().mapToLong(LocalShardSnapshot::maxUnsafeAutoIdTimestamp).max().getAsLong();
addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, sources, maxSeqNo, maxUnsafeAutoIdTimestamp,
indexShard.indexSettings().getIndexMetaData(), indexShard.shardId().id(), isSplit);
indexShard.indexSettings().getIndexMetaData(), indexShard.shardId().id(), isSplit, hasNested);
internalRecoverFromStore(indexShard);
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
@ -142,8 +143,8 @@ final class StoreRecovery {
}
void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory target, final Sort indexSort, final Directory[] sources,
final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split)
throws IOException {
final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split,
boolean hasNested) throws IOException {
final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setCommitOnClose(false)
@ -158,9 +159,8 @@ final class StoreRecovery {
try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(hardLinkOrCopyTarget, indexRecoveryStats), iwc)) {
writer.addIndexes(sources);
if (split) {
writer.deleteDocuments(new ShardSplittingQuery(indexMetaData, shardId));
writer.deleteDocuments(new ShardSplittingQuery(indexMetaData, shardId, hasNested));
}
/*
* We set the maximum sequence number and the local checkpoint on the target to the maximum of the maximum sequence numbers on

View File

@ -171,8 +171,12 @@ public abstract class ParsedMultiBucketAggregation<B extends ParsedMultiBucketAg
bucket.setDocCount(parser.longValue());
}
} else if (token == XContentParser.Token.START_OBJECT) {
XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class,
if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
keyConsumer.accept(parser, bucket);
} else {
XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class,
aggregations::add);
}
}
}
bucket.setAggregations(new Aggregations(aggregations));

View File

@ -51,7 +51,7 @@ public enum SortOrder implements Writeable {
}
};
static SortOrder readFromStream(StreamInput in) throws IOException {
public static SortOrder readFromStream(StreamInput in) throws IOException {
return in.readEnum(SortOrder.class);
}

View File

@ -208,12 +208,12 @@ public final class ConnectionProfile {
* Returns one of the channels out configured for this handle. The channel is selected in a round-robin
* fashion.
*/
<T> T getChannel(T[] channels) {
<T> T getChannel(List<T> channels) {
if (length == 0) {
throw new IllegalStateException("can't select channel size is 0 for types: " + types);
}
assert channels.length >= offset + length : "illegal size: " + channels.length + " expected >= " + (offset + length);
return channels[offset + Math.floorMod(counter.incrementAndGet(), length)];
assert channels.size() >= offset + length : "illegal size: " + channels.size() + " expected >= " + (offset + length);
return channels.get(offset + Math.floorMod(counter.incrementAndGet(), length));
}
/**
@ -223,5 +223,4 @@ public final class ConnectionProfile {
return types;
}
}
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskManager;
import java.io.IOException;
import java.util.function.Supplier;
public class RequestHandlerRegistry<Request extends TransportRequest> {
@ -64,7 +63,7 @@ public class RequestHandlerRegistry<Request extends TransportRequest> {
} else {
boolean success = false;
try {
handler.messageReceived(request, new TransportChannelWrapper(taskManager, task, channel), task);
handler.messageReceived(request, new TaskTransportChannel(taskManager, task, channel), task);
success = true;
} finally {
if (success == false) {
@ -91,38 +90,4 @@ public class RequestHandlerRegistry<Request extends TransportRequest> {
return handler.toString();
}
private static class TransportChannelWrapper extends DelegatingTransportChannel {
private final Task task;
private final TaskManager taskManager;
TransportChannelWrapper(TaskManager taskManager, Task task, TransportChannel channel) {
super(channel);
this.task = task;
this.taskManager = taskManager;
}
@Override
public void sendResponse(TransportResponse response) throws IOException {
endTask();
super.sendResponse(response);
}
@Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
endTask();
super.sendResponse(response, options);
}
@Override
public void sendResponse(Exception exception) throws IOException {
endTask();
super.sendResponse(exception);
}
private void endTask() {
taskManager.unregister(task);
}
}
}

View File

@ -20,24 +20,22 @@
package org.elasticsearch.transport;
import org.elasticsearch.Version;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskManager;
import java.io.IOException;
/**
* Wrapper around transport channel that delegates all requests to the
* underlying channel
*/
public class DelegatingTransportChannel implements TransportChannel {
public class TaskTransportChannel implements TransportChannel {
private final Task task;
private final TaskManager taskManager;
private final TransportChannel channel;
protected DelegatingTransportChannel(TransportChannel channel) {
TaskTransportChannel(TaskManager taskManager, Task task, TransportChannel channel) {
this.channel = channel;
}
@Override
public String action() {
return channel.action();
this.task = task;
this.taskManager = taskManager;
}
@Override
@ -45,11 +43,6 @@ public class DelegatingTransportChannel implements TransportChannel {
return channel.getProfileName();
}
@Override
public long getRequestId() {
return channel.getRequestId();
}
@Override
public String getChannelType() {
return channel.getChannelType();
@ -57,25 +50,32 @@ public class DelegatingTransportChannel implements TransportChannel {
@Override
public void sendResponse(TransportResponse response) throws IOException {
endTask();
channel.sendResponse(response);
}
@Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
endTask();
channel.sendResponse(response, options);
}
@Override
public void sendResponse(Exception exception) throws IOException {
endTask();
channel.sendResponse(exception);
}
public TransportChannel getChannel() {
return channel;
}
@Override
public Version getVersion() {
return channel.getVersion();
}
public TransportChannel getChannel() {
return channel;
}
private void endTask() {
taskManager.unregister(task);
}
}

View File

@ -0,0 +1,169 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* This is a tcp channel representing a single channel connection to another node. It is the base channel
* abstraction used by the {@link TcpTransport} and {@link TransportService}. All tcp transport
* implementations must return channels that adhere to the required method contracts.
*/
public interface TcpChannel extends Releasable {
/**
* Closes the channel. This might be an asynchronous process. There is notguarantee that the channel
* will be closed when this method returns. Use the {@link #addCloseListener(ActionListener)} method
* to implement logic that depends on knowing when the channel is closed.
*/
void close();
/**
* Adds a listener that will be executed when the channel is closed. If the channel is still open when
* this listener is added, the listener will be executed by the thread that eventually closes the
* channel. If the channel is already closed when the listener is added the listener will immediately be
* executed by the thread that is attempting to add the listener.
*
* @param listener to be executed
*/
void addCloseListener(ActionListener<TcpChannel> listener);
/**
* This sets the low level socket option {@link java.net.StandardSocketOptions} SO_LINGER on a channel.
*
* @param value to set for SO_LINGER
* @throws IOException that can be throw by the low level socket implementation
*/
void setSoLinger(int value) throws IOException;
/**
* Indicates whether a channel is currently open
*
* @return boolean indicating if channel is open
*/
boolean isOpen();
/**
* Closes the channel.
*
* @param channel to close
* @param blocking indicates if we should block on channel close
*/
static <C extends TcpChannel> void closeChannel(C channel, boolean blocking) {
closeChannels(Collections.singletonList(channel), blocking);
}
/**
* Closes the channels.
*
* @param channels to close
* @param blocking indicates if we should block on channel close
*/
static <C extends TcpChannel> void closeChannels(List<C> channels, boolean blocking) {
if (blocking) {
ArrayList<ActionFuture<TcpChannel>> futures = new ArrayList<>(channels.size());
for (final C channel : channels) {
if (channel.isOpen()) {
PlainActionFuture<TcpChannel> closeFuture = PlainActionFuture.newFuture();
channel.addCloseListener(closeFuture);
channel.close();
futures.add(closeFuture);
}
}
blockOnFutures(futures);
} else {
Releasables.close(channels);
}
}
/**
* Awaits for all of the pending connections to complete. Will throw an exception if at least one of the
* connections fails.
*
* @param discoveryNode the node for the pending connections
* @param connectionFutures representing the pending connections
* @param connectTimeout to wait for a connection
* @param <C> the type of channel
* @throws ConnectTransportException if one of the connections fails
*/
static <C extends TcpChannel> void awaitConnected(DiscoveryNode discoveryNode, List<ActionFuture<C>> connectionFutures,
TimeValue connectTimeout) throws ConnectTransportException {
Exception connectionException = null;
boolean allConnected = true;
for (ActionFuture<C> connectionFuture : connectionFutures) {
try {
connectionFuture.get(connectTimeout.getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
allConnected = false;
break;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
} catch (ExecutionException e) {
allConnected = false;
connectionException = (Exception) e.getCause();
break;
}
}
if (allConnected == false) {
if (connectionException == null) {
throw new ConnectTransportException(discoveryNode, "connect_timeout[" + connectTimeout + "]");
} else {
throw new ConnectTransportException(discoveryNode, "connect_exception", connectionException);
}
}
}
static void blockOnFutures(List<ActionFuture<TcpChannel>> futures) {
for (ActionFuture<TcpChannel> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
// Ignore as we are only interested in waiting for the close process to complete. Logging
// close exceptions happens elsewhere.
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Future got interrupted", e);
}
}
}
}

View File

@ -25,8 +25,10 @@ import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NotifyOnceListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.Nullable;
@ -104,7 +106,6 @@ import java.util.function.Consumer;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableMap;
@ -117,7 +118,7 @@ import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseC
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent implements Transport {
public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractLifecycleComponent implements Transport {
public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker";
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
@ -178,7 +179,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
public static final Setting.AffixSetting<List<String>> PUBLISH_HOST_PROFILE = affixKeySetting("transport.profiles.", "publish_host",
key -> listSetting(key, PUBLISH_HOST, Function.identity(), Setting.Property.NodeScope));
public static final Setting.AffixSetting<String> PORT_PROFILE = affixKeySetting("transport.profiles.", "port",
key -> new Setting(key, PORT, Function.identity(), Setting.Property.NodeScope));
key -> new Setting<>(key, PORT, Function.identity(), Setting.Property.NodeScope));
public static final Setting.AffixSetting<Integer> PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port",
key -> intSetting(key, -1, -1, Setting.Property.NodeScope));
@ -197,8 +198,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
// node id to actual channel
protected final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
protected final Map<String, List<Channel>> serverChannels = newConcurrentMap();
protected final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
private final Map<String, List<Channel>> serverChannels = newConcurrentMap();
private final Set<Channel> acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
protected final KeyedLock<String> connectionLock = new KeyedLock<>();
private final NamedWriteableRegistry namedWriteableRegistry;
@ -347,7 +349,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
@Override
protected void innerOnFailure(Exception e) {
if (isOpen(channel)) {
if (channel.isOpen()) {
logger.debug(
(Supplier<?>) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
failedPings.inc();
@ -395,29 +397,22 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
public final class NodeChannels implements Connection {
private final Map<TransportRequestOptions.Type, ConnectionProfile.ConnectionTypeHandle> typeMapping;
private final Channel[] channels;
private final List<Channel> channels;
private final DiscoveryNode node;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Version version;
public NodeChannels(DiscoveryNode node, Channel[] channels, ConnectionProfile connectionProfile) {
NodeChannels(DiscoveryNode node, List<Channel> channels, ConnectionProfile connectionProfile, Version handshakeVersion) {
this.node = node;
this.channels = channels;
assert channels.length == connectionProfile.getNumConnections() : "expected channels size to be == "
+ connectionProfile.getNumConnections() + " but was: [" + channels.length + "]";
this.channels = Collections.unmodifiableList(channels);
assert channels.size() == connectionProfile.getNumConnections() : "expected channels size to be == "
+ connectionProfile.getNumConnections() + " but was: [" + channels.size() + "]";
typeMapping = new EnumMap<>(TransportRequestOptions.Type.class);
for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) {
for (TransportRequestOptions.Type type : handle.getTypes())
typeMapping.put(type, handle);
}
version = node.getVersion();
}
NodeChannels(NodeChannels channels, Version handshakeVersion) {
this.node = channels.node;
this.channels = channels.channels;
this.typeMapping = channels.typeMapping;
this.version = handshakeVersion;
version = handshakeVersion;
}
@Override
@ -426,7 +421,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
public List<Channel> getChannels() {
return Arrays.asList(channels);
return channels;
}
public Channel channel(TransportRequestOptions.Type type) {
@ -437,12 +432,34 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
return connectionTypeHandle.getChannel(channels);
}
public boolean allChannelsOpen() {
return channels.stream().allMatch(TcpChannel::isOpen);
}
@Override
public void close() throws IOException {
if (closed.compareAndSet(false, true)) {
try {
closeChannels(Arrays.stream(channels).filter(Objects::nonNull).collect(Collectors.toList()), false,
lifecycle.stopped());
if (lifecycle.stopped()) {
/* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't
* have a gazillion connections sitting in TIME_WAIT to free up resources quickly.
* This is really the only part where we close the connection from the server side
* otherwise the client (node) initiates the TCP closing sequence which doesn't cause
* these issues. Setting this by default from the beginning can have unexpected
* side-effects an should be avoided, our protocol is designed in a way that clients
* close connection which is how it should be*/
channels.forEach(c -> {
try {
c.setSoLinger(0);
} catch (IOException e) {
logger.warn(new ParameterizedMessage("unexpected exception when setting SO_LINGER on channel {}", c), e);
}
});
}
boolean block = lifecycle.stopped() && Transports.isTransportThread(Thread.currentThread()) == false;
TcpChannel.closeChannels(channels, block);
} finally {
transportService.onConnectionClosed(this);
}
@ -478,7 +495,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile,
CheckedBiConsumer<Connection, ConnectionProfile, IOException> connectionValidator)
throws ConnectTransportException {
connectionProfile = resolveConnectionProfile(connectionProfile, defaultConnectionProfile);
connectionProfile = resolveConnectionProfile(connectionProfile);
if (node == null) {
throw new ConnectTransportException(null, "can't connect to a null node");
}
@ -559,6 +576,10 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
}
protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) {
return resolveConnectionProfile(connectionProfile, defaultConnectionProfile);
}
@Override
public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException {
if (node == null) {
@ -566,40 +587,66 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
boolean success = false;
NodeChannels nodeChannels = null;
connectionProfile = resolveConnectionProfile(connectionProfile, defaultConnectionProfile);
connectionProfile = resolveConnectionProfile(connectionProfile);
closeLock.readLock().lock(); // ensure we don't open connections while we are closing
try {
ensureOpen();
try {
final AtomicBoolean runOnce = new AtomicBoolean(false);
final AtomicReference<NodeChannels> connectionRef = new AtomicReference<>();
Consumer<Channel> onClose = c -> {
assert isOpen(c) == false : "channel is still open when onClose is called";
int numConnections = connectionProfile.getNumConnections();
assert numConnections > 0 : "A connection profile must be configured with at least one connection";
List<Channel> channels = new ArrayList<>(numConnections);
List<ActionFuture<Channel>> connectionFutures = new ArrayList<>(numConnections);
for (int i = 0; i < numConnections; ++i) {
try {
onChannelClosed(c);
} finally {
// we only need to disconnect from the nodes once since all other channels
// will also try to run this we protect it from running multiple times.
if (runOnce.compareAndSet(false, true)) {
NodeChannels connection = connectionRef.get();
if (connection != null) {
disconnectFromNodeCloseAndNotify(node, connection);
}
}
PlainActionFuture<Channel> connectFuture = PlainActionFuture.newFuture();
connectionFutures.add(connectFuture);
Channel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture);
channels.add(channel);
} catch (Exception e) {
// If there was an exception when attempting to instantiate the raw channels, we close all of the channels
TcpChannel.closeChannels(channels, false);
throw e;
}
}
// If we make it past the block above, we successfully instantiated all of the channels
try {
TcpChannel.awaitConnected(node, connectionFutures, connectionProfile.getConnectTimeout());
} catch (Exception ex) {
TcpChannel.closeChannels(channels, false);
throw ex;
}
// If we make it past the block above, we have successfully established connections for all of the channels
final Channel handshakeChannel = channels.get(0); // one channel is guaranteed by the connection profile
handshakeChannel.addCloseListener(ActionListener.wrap(() -> cancelHandshakeForChannel(handshakeChannel)));
Version version;
try {
version = executeHandshake(node, handshakeChannel, connectionProfile.getHandshakeTimeout());
} catch (Exception ex) {
TcpChannel.closeChannels(channels, false);
throw ex;
}
// If we make it past the block above, we have successfully completed the handshake and the connection is now open.
// At this point we should construct the connection, notify the transport service, and attach close listeners to the
// underlying channels.
nodeChannels = new NodeChannels(node, channels, connectionProfile, version);
transportService.onConnectionOpened(nodeChannels);
final NodeChannels finalNodeChannels = nodeChannels;
final AtomicBoolean runOnce = new AtomicBoolean(false);
Consumer<Channel> onClose = c -> {
assert c.isOpen() == false : "channel is still open when onClose is called";
// we only need to disconnect from the nodes once since all other channels
// will also try to run this we protect it from running multiple times.
if (runOnce.compareAndSet(false, true)) {
disconnectFromNodeCloseAndNotify(node, finalNodeChannels);
}
};
nodeChannels = connectToChannels(node, connectionProfile, onClose);
final Channel channel = nodeChannels.getChannels().get(0); // one channel is guaranteed by the connection profile
final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ?
defaultConnectionProfile.getConnectTimeout() :
connectionProfile.getConnectTimeout();
final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ?
connectTimeout : connectionProfile.getHandshakeTimeout();
final Version version = executeHandshake(node, channel, handshakeTimeout);
nodeChannels = new NodeChannels(nodeChannels, version); // clone the channels - we now have the correct version
transportService.onConnectionOpened(nodeChannels);
connectionRef.set(nodeChannels);
if (Arrays.stream(nodeChannels.channels).allMatch(this::isOpen) == false) {
nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch))));
if (nodeChannels.allChannelsOpen() == false) {
throw new ConnectTransportException(node, "a channel closed while connecting");
}
success = true;
@ -637,19 +684,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
}
/**
* Disconnects from a node if a channel is found as part of that nodes channels.
*/
protected final void closeChannelWhileHandlingExceptions(final Channel channel) {
if (isOpen(channel)) {
try {
closeChannels(Collections.singletonList(channel), false, false);
} catch (IOException e) {
logger.warn("failed to close channel", e);
}
}
}
@Override
public NodeChannels getConnection(DiscoveryNode node) {
NodeChannels nodeChannels = connectedNodes.get(node);
@ -904,12 +938,20 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
try {
// first stop to accept any incoming connections so nobody can connect to this transport
for (Map.Entry<String, List<Channel>> entry : serverChannels.entrySet()) {
try {
closeChannels(entry.getValue(), true, false);
} catch (Exception e) {
logger.warn(new ParameterizedMessage("Error closing serverChannel for profile [{}]", entry.getKey()), e);
}
String profile = entry.getKey();
List<Channel> channels = entry.getValue();
ActionListener<TcpChannel> closeFailLogger = ActionListener.wrap(c -> {},
e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e));
channels.forEach(c -> c.addCloseListener(closeFailLogger));
TcpChannel.closeChannels(channels, true);
}
serverChannels.clear();
// close all of the incoming channels. The closeChannels method takes a list so we must convert the set.
TcpChannel.closeChannels(new ArrayList<>(acceptedChannels), true);
acceptedChannels.clear();
// we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close
// all instances and then clear them maps
Iterator<Map.Entry<DiscoveryNode, NodeChannels>> iterator = connectedNodes.entrySet().iterator();
@ -940,7 +982,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
protected void onException(Channel channel, Exception e) {
if (!lifecycle.started()) {
// just close and ignore - we are already stopped and just need to make sure we release all resources
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
return;
}
@ -951,15 +993,15 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
channel),
e);
// close the channel, which will cause a node to be disconnected if relevant
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
} else if (isConnectException(e)) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
} else if (e instanceof BindException) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
} else if (e instanceof CancelledKeyException) {
logger.trace(
(Supplier<?>) () -> new ParameterizedMessage(
@ -967,29 +1009,21 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
channel),
e);
// close the channel as safe measure, which will cause a node to be disconnected if relevant
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
} else if (e instanceof TcpTransport.HttpOnTransportException) {
// in case we are able to return data, serialize the exception content and sent it back to the client
if (isOpen(channel)) {
if (channel.isOpen()) {
BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8));
final SendMetricListener<Channel> closeChannel = new SendMetricListener<Channel>(message.length()) {
@Override
protected void innerInnerOnResponse(Channel channel) {
try {
closeChannels(Collections.singletonList(channel), false, false);
} catch (IOException e1) {
logger.debug("failed to close httpOnTransport channel", e1);
}
TcpChannel.closeChannel(channel, false);
}
@Override
protected void innerOnFailure(Exception e) {
try {
closeChannels(Collections.singletonList(channel), false, false);
} catch (IOException e1) {
e.addSuppressed(e1);
logger.debug("failed to close httpOnTransport channel", e1);
}
logger.debug("failed to send message to httpOnTransport channel", e);
TcpChannel.closeChannel(channel, false);
}
};
internalSendMessage(channel, message, closeChannel);
@ -998,10 +1032,16 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
// close the channel, which will cause a node to be disconnected if relevant
closeChannelWhileHandlingExceptions(channel);
TcpChannel.closeChannel(channel, false);
}
}
protected void serverAcceptedChannel(Channel channel) {
boolean addedOnThisCall = acceptedChannels.add(channel);
assert addedOnThisCall : "Channel should only be added to accept channel set once";
channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel)));
}
/**
* Returns the channels local address
*/
@ -1015,44 +1055,34 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
*/
protected abstract Channel bind(String name, InetSocketAddress address) throws IOException;
/**
* Closes all channels in this list. If the blocking boolean is set to true, the channels must be
* closed before the method returns. This should never be called with blocking set to true from a
* network thread.
*
* @param channels the channels to close
* @param blocking whether the channels should be closed synchronously
* @param doNotLinger whether we abort the connection on RST instead of FIN
*/
protected abstract void closeChannels(List<Channel> channels, boolean blocking, boolean doNotLinger) throws IOException;
/**
* Sends message to channel. The listener's onResponse method will be called when the send is complete unless an exception
* is thrown during the send. If an exception is thrown, the listener's onException method will be called.
* @param channel the destination channel
*
* @param channel the destination channel
* @param reference the byte reference for the message
* @param listener the listener to call when the operation has completed
* @param listener the listener to call when the operation has completed
*/
protected abstract void sendMessage(Channel channel, BytesReference reference, ActionListener<Channel> listener);
/**
* Connect to the node with channels as defined by the specified connection profile. Implementations must invoke the specified channel
* close callback when a channel is closed.
* Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout.
* It is provided for synchronous connection implementations.
*
* @param node the node to connect to
* @param connectionProfile the connection profile
* @param onChannelClose callback to invoke when a channel is closed
* @return the channels
* @throws IOException if an I/O exception occurs while opening channels
* @param node the node
* @param connectTimeout the connection timeout
* @param connectListener listener to be called when connection complete
* @return the pending connection
* @throws IOException if an I/O exception occurs while opening the channel
*/
protected abstract NodeChannels connectToChannels(DiscoveryNode node,
ConnectionProfile connectionProfile,
Consumer<Channel> onChannelClose) throws IOException;
protected abstract Channel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Channel> connectListener)
throws IOException;
/**
* Called to tear down internal resources
*/
protected void stopInternal() {}
protected void stopInternal() {
}
public boolean canCompress(TransportRequest request) {
return compress && (!(request instanceof BytesTransportRequest));
@ -1118,10 +1148,10 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
* Sends back an error response to the caller via the given channel
*
* @param nodeVersion the caller node version
* @param channel the channel to send the response to
* @param error the error to return
* @param requestId the request ID this response replies to
* @param action the action this response replies to
* @param channel the channel to send the response to
* @param error the error to return
* @param requestId the request ID this response replies to
* @param action the action this response replies to
*/
public void sendErrorResponse(Version nodeVersion, Channel channel, final Exception error, final long requestId,
final String action) throws IOException {
@ -1146,7 +1176,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
/**
* Sends the response to the given channel. This method should be used to send {@link TransportResponse} objects back to the caller.
*
* @see #sendErrorResponse(Version, Object, Exception, long, String) for sending back errors to the caller
* @see #sendErrorResponse(Version, TcpChannel, Exception, long, String) for sending back errors to the caller
*/
public void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId,
final String action, TransportResponseOptions options) throws IOException {
@ -1154,7 +1184,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
private void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId,
final String action, TransportResponseOptions options, byte status) throws IOException {
final String action, TransportResponseOptions options, byte status) throws IOException {
if (compress) {
options = TransportResponseOptions.builder(options).withCompress(true).build();
}
@ -1232,10 +1262,10 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
* Validates the first N bytes of the message header and returns <code>false</code> if the message is
* a ping message and has no payload ie. isn't a real user level message.
*
* @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size
* @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size
* @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message
* @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available
* memory.
* memory.
*/
public static boolean validateMessageHeader(BytesReference buffer) throws IOException {
final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
@ -1246,23 +1276,23 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') {
// special handling for what is probably HTTP
if (bufferStartsWith(buffer, offset, "GET ") ||
bufferStartsWith(buffer, offset, "POST ") ||
bufferStartsWith(buffer, offset, "PUT ") ||
bufferStartsWith(buffer, offset, "HEAD ") ||
bufferStartsWith(buffer, offset, "DELETE ") ||
bufferStartsWith(buffer, offset, "OPTIONS ") ||
bufferStartsWith(buffer, offset, "PATCH ") ||
bufferStartsWith(buffer, offset, "TRACE ")) {
bufferStartsWith(buffer, offset, "POST ") ||
bufferStartsWith(buffer, offset, "PUT ") ||
bufferStartsWith(buffer, offset, "HEAD ") ||
bufferStartsWith(buffer, offset, "DELETE ") ||
bufferStartsWith(buffer, offset, "OPTIONS ") ||
bufferStartsWith(buffer, offset, "PATCH ") ||
bufferStartsWith(buffer, offset, "TRACE ")) {
throw new HttpOnTransportException("This is not a HTTP port");
}
// we have 6 readable bytes, show 4 (should be enough)
throw new StreamCorruptedException("invalid internal transport message format, got ("
+ Integer.toHexString(buffer.get(offset) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 1) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 2) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")");
+ Integer.toHexString(buffer.get(offset) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 1) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 2) & 0xFF) + ","
+ Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")");
}
final int dataLen;
@ -1322,8 +1352,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
}
protected abstract boolean isOpen(Channel channel);
/**
* This method handles the message receive part for both request and responses
*/
@ -1410,7 +1438,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion;
if (version.isCompatible(compatibilityVersion) == false) {
final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion();
String msg = "Received " + (isHandshake? "handshake " : "") + "message from unsupported version: [";
String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: [";
throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]");
}
}
@ -1566,7 +1594,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
this.version = version;
}
private VersionHandshakeResponse() {}
private VersionHandshakeResponse() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
@ -1591,7 +1620,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
pendingHandshakes.put(requestId, handler);
boolean success = false;
try {
if (isOpen(channel) == false) {
if (channel.isOpen() == false) {
// we have to protect us here since sendRequestToChannel won't barf if the channel is closed.
// it's weird but to change it will cause a lot of impact on the exception handling code all over the codebase.
// yet, if we don't check the state here we might have registered a pending handshake handler but the close
@ -1642,9 +1671,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
/**
* Called once the channel is closed for instance due to a disconnect or a closed socket etc.
*/
private void onChannelClosed(Channel channel) {
private void cancelHandshakeForChannel(Channel channel) {
final Optional<Long> first = pendingHandshakes.entrySet().stream()
.filter((entry) -> entry.getValue().channel == channel).map((e) -> e.getKey()).findFirst();
.filter((entry) -> entry.getValue().channel == channel).map(Map.Entry::getKey).findFirst();
if (first.isPresent()) {
final Long requestId = first.get();
final HandshakeResponseHandler handler = pendingHandshakes.remove(requestId);
@ -1778,5 +1807,4 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings);
}
}
}

View File

@ -23,19 +23,19 @@ import org.elasticsearch.Version;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
public final class TcpTransportChannel<Channel> implements TransportChannel {
public final class TcpTransportChannel<Channel extends TcpChannel> implements TransportChannel {
private final TcpTransport<Channel> transport;
protected final Version version;
protected final String action;
protected final long requestId;
private final Version version;
private final String action;
private final long requestId;
private final String profileName;
private final long reservedBytes;
private final AtomicBoolean released = new AtomicBoolean();
private final String channelType;
private final Channel channel;
public TcpTransportChannel(TcpTransport<Channel> transport, Channel channel, String channelType, String action,
long requestId, Version version, String profileName, long reservedBytes) {
TcpTransportChannel(TcpTransport<Channel> transport, Channel channel, String channelType, String action,
long requestId, Version version, String profileName, long reservedBytes) {
this.version = version;
this.channel = channel;
this.transport = transport;
@ -51,11 +51,6 @@ public final class TcpTransportChannel<Channel> implements TransportChannel {
return profileName;
}
@Override
public String action() {
return this.action;
}
@Override
public void sendResponse(TransportResponse response) throws IOException {
sendResponse(response, TransportResponseOptions.EMPTY);
@ -78,6 +73,7 @@ public final class TcpTransportChannel<Channel> implements TransportChannel {
release(true);
}
}
private Exception releaseBy;
private void release(boolean isExceptionResponse) {
@ -91,23 +87,18 @@ public final class TcpTransportChannel<Channel> implements TransportChannel {
}
}
@Override
public long getRequestId() {
return requestId;
}
@Override
public String getChannelType() {
return channelType;
}
public Channel getChannel() {
return channel;
}
@Override
public Version getVersion() {
return version;
}
public Channel getChannel() {
return channel;
}
}

View File

@ -28,12 +28,8 @@ import java.io.IOException;
*/
public interface TransportChannel {
String action();
String getProfileName();
long getRequestId();
String getChannelType();
void sendResponse(TransportResponse response) throws IOException;

View File

@ -1117,8 +1117,8 @@ public class TransportService extends AbstractLifecycleComponent {
final TransportService service;
final ThreadPool threadPool;
DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId,
TransportService service, ThreadPool threadPool) {
DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, TransportService service,
ThreadPool threadPool) {
this.logger = logger;
this.localNode = localNode;
this.action = action;
@ -1127,11 +1127,6 @@ public class TransportService extends AbstractLifecycleComponent {
this.threadPool = threadPool;
}
@Override
public String action() {
return action;
}
@Override
public String getProfileName() {
return DIRECT_RESPONSE_PROFILE;
@ -1177,13 +1172,7 @@ public class TransportService extends AbstractLifecycleComponent {
if (ThreadPool.Names.SAME.equals(executor)) {
processException(handler, rtx);
} else {
threadPool.executor(handler.executor()).execute(new Runnable() {
@SuppressWarnings({"unchecked"})
@Override
public void run() {
processException(handler, rtx);
}
});
threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx));
}
}
}
@ -1205,11 +1194,6 @@ public class TransportService extends AbstractLifecycleComponent {
}
}
@Override
public long getRequestId() {
return requestId;
}
@Override
public String getChannelType() {
return "direct";

View File

@ -23,6 +23,7 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
@ -57,15 +58,23 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.IntFunction;
import java.util.stream.IntStream;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -78,13 +87,14 @@ public class SplitIndexIT extends ESIntegTestCase {
return Arrays.asList(InternalSettingsPlugin.class);
}
public void testCreateSplitIndexToN() {
public void testCreateSplitIndexToN() throws IOException {
int[][] possibleShardSplits = new int[][] {{2,4,8}, {3, 6, 12}, {1, 2, 4}};
int[] shardSplits = randomFrom(possibleShardSplits);
assertEquals(shardSplits[0], (shardSplits[0] * shardSplits[1]) / shardSplits[1]);
assertEquals(shardSplits[1], (shardSplits[1] * shardSplits[2]) / shardSplits[2]);
internalCluster().ensureAtLeastNumDataNodes(2);
final boolean useRouting = randomBoolean();
final boolean useNested = randomBoolean();
final boolean useMixedRouting = useRouting ? randomBoolean() : false;
CreateIndexRequestBuilder createInitialIndex = prepareCreate("source");
final int routingShards = shardSplits[2] * randomIntBetween(1, 10);
@ -93,16 +103,43 @@ public class SplitIndexIT extends ESIntegTestCase {
.put("index.number_of_routing_shards", routingShards);
if (useRouting && useMixedRouting == false && randomBoolean()) {
settings.put("index.routing_partition_size", randomIntBetween(1, routingShards - 1));
createInitialIndex.addMapping("t1", "_routing", "required=true");
if (useNested) {
createInitialIndex.addMapping("t1", "_routing", "required=true", "nested1", "type=nested");
} else {
createInitialIndex.addMapping("t1", "_routing", "required=true");
}
} else if (useNested) {
createInitialIndex.addMapping("t1", "nested1", "type=nested");
}
logger.info("use routing {} use mixed routing {}", useRouting, useMixedRouting);
logger.info("use routing {} use mixed routing {} use nested {}", useRouting, useMixedRouting, useNested);
createInitialIndex.setSettings(settings).get();
int numDocs = randomIntBetween(10, 50);
String[] routingValue = new String[numDocs];
BiFunction<String, Integer, IndexRequestBuilder> indexFunc = (index, id) -> {
try {
return client().prepareIndex(index, "t1", Integer.toString(id))
.setSource(jsonBuilder().startObject()
.field("foo", "bar")
.field("i", id)
.startArray("nested1")
.startObject()
.field("n_field1", "n_value1_1")
.field("n_field2", "n_value2_1")
.endObject()
.startObject()
.field("n_field1", "n_value1_2")
.field("n_field2", "n_value2_2")
.endObject()
.endArray()
.endObject());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
for (int i = 0; i < numDocs; i++) {
IndexRequestBuilder builder = client().prepareIndex("source", "t1", Integer.toString(i))
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON);
IndexRequestBuilder builder = indexFunc.apply("source", i);
if (useRouting) {
String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
if (useMixedRouting && randomBoolean()) {
@ -118,8 +155,7 @@ public class SplitIndexIT extends ESIntegTestCase {
if (randomBoolean()) {
for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index
if (randomBoolean()) {
IndexRequestBuilder builder = client().prepareIndex("source", "t1", Integer.toString(i))
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON);
IndexRequestBuilder builder = indexFunc.apply("source", i);
if (useRouting) {
builder.setRouting(routingValue[i]);
}
@ -145,8 +181,7 @@ public class SplitIndexIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
for (int i = 0; i < numDocs; i++) { // now update
IndexRequestBuilder builder = client().prepareIndex("first_split", "t1", Integer.toString(i))
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON);
IndexRequestBuilder builder = indexFunc.apply("first_split", i);
if (useRouting) {
builder.setRouting(routingValue[i]);
}
@ -180,8 +215,7 @@ public class SplitIndexIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
for (int i = 0; i < numDocs; i++) { // now update
IndexRequestBuilder builder = client().prepareIndex("second_split", "t1", Integer.toString(i))
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON);
IndexRequestBuilder builder = indexFunc.apply("second_split", i);
if (useRouting) {
builder.setRouting(routingValue[i]);
}
@ -195,14 +229,25 @@ public class SplitIndexIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
if (useNested) {
assertNested("source", numDocs);
assertNested("first_split", numDocs);
assertNested("second_split", numDocs);
}
assertAllUniqueDocs(client().prepareSearch("second_split").setSize(100)
.setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
assertAllUniqueDocs(client().prepareSearch("first_split").setSize(100)
.setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
assertAllUniqueDocs(client().prepareSearch("source").setSize(100)
.setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
}
public void assertNested(String index, int numDocs) {
// now, do a nested query
SearchResponse searchResponse = client().prepareSearch(index).setQuery(nestedQuery("nested1", termQuery("nested1.n_field1",
"n_value1_1"), ScoreMode.Avg)).get();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().getTotalHits(), equalTo((long)numDocs));
}
public void assertAllUniqueDocs(SearchResponse response, int numDocs) {

View File

@ -471,11 +471,6 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
return capturedResponse;
}
@Override
public String action() {
return null;
}
@Override
public String getProfileName() {
return "";
@ -494,11 +489,6 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
public void sendResponse(Exception exception) throws IOException {
}
@Override
public long getRequestId() {
return 0;
}
@Override
public String getChannelType() {
return "test";

View File

@ -1237,11 +1237,6 @@ public class TransportReplicationActionTests extends ESTestCase {
public TransportChannel createTransportChannel(final PlainActionFuture<TestResponse> listener) {
return new TransportChannel() {
@Override
public String action() {
return null;
}
@Override
public String getProfileName() {
return "";
@ -1262,11 +1257,6 @@ public class TransportReplicationActionTests extends ESTestCase {
listener.onFailure(exception);
}
@Override
public long getRequestId() {
return 0;
}
@Override
public String getChannelType() {
return "replica_test";

View File

@ -914,11 +914,6 @@ public class PublishClusterStateActionTests extends ESTestCase {
error.set(null);
}
@Override
public String action() {
return "_noop_";
}
@Override
public String getProfileName() {
return "_noop_";
@ -942,11 +937,6 @@ public class PublishClusterStateActionTests extends ESTestCase {
assertThat(response.get(), nullValue());
}
@Override
public long getRequestId() {
return 0;
}
@Override
public String getChannelType() {
return "capturing";

View File

@ -47,7 +47,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.plugins.ClusterPlugin;
import org.elasticsearch.test.ClusterServiceUtils;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
@ -378,21 +377,12 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
} else {
AtomicBoolean sendResponse = new AtomicBoolean(false);
request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), new TransportChannel() {
@Override
public String action() {
return null;
}
@Override
public String getProfileName() {
return null;
}
@Override
public long getRequestId() {
return 0;
}
@Override
public String getChannelType() {
return null;

View File

@ -23,6 +23,7 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SortedNumericDocValues;
@ -38,10 +39,12 @@ import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@ -58,18 +61,36 @@ public class ShardSplittingQueryTests extends ESTestCase {
.setRoutingNumShards(numShards * 1000000)
.numberOfReplicas(0).build();
int targetShardId = randomIntBetween(0, numShards-1);
boolean hasNested = randomBoolean();
for (int j = 0; j < numDocs; j++) {
int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
if (hasNested) {
List<Iterable<IndexableField>> docs = new ArrayList<>();
int numNested = randomIntBetween(0, 10);
for (int i = 0; i < numNested; i++) {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
}
writer.commit();
writer.close();
assertSplit(dir, metaData, targetShardId);
assertSplit(dir, metaData, targetShardId, hasNested);
dir.close();
}
@ -83,19 +104,38 @@ public class ShardSplittingQueryTests extends ESTestCase {
.numberOfShards(numShards)
.setRoutingNumShards(numShards * 1000000)
.numberOfReplicas(0).build();
boolean hasNested = randomBoolean();
int targetShardId = randomIntBetween(0, numShards-1);
for (int j = 0; j < numDocs; j++) {
String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
final int shardId = OperationRouting.generateShardId(metaData, null, routing);
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
if (hasNested) {
List<Iterable<IndexableField>> docs = new ArrayList<>();
int numNested = randomIntBetween(0, 10);
for (int i = 0; i < numNested; i++) {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
}
writer.commit();
writer.close();
assertSplit(dir, metaData, targetShardId);
assertSplit(dir, metaData, targetShardId, hasNested);
dir.close();
}
@ -103,33 +143,52 @@ public class ShardSplittingQueryTests extends ESTestCase {
Directory dir = newFSDirectory(createTempDir());
final int numDocs = randomIntBetween(50, 100);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
int numShards = randomIntBetween(2, 10);
int numShards = randomIntBetween(2, 10);
IndexMetaData metaData = IndexMetaData.builder("test")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(numShards)
.setRoutingNumShards(numShards * 1000000)
.numberOfReplicas(0).build();
boolean hasNested = randomBoolean();
int targetShardId = randomIntBetween(0, numShards-1);
for (int j = 0; j < numDocs; j++) {
Iterable<IndexableField> rootDoc;
final int shardId;
if (randomBoolean()) {
String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
final int shardId = OperationRouting.generateShardId(metaData, null, routing);
writer.addDocument(Arrays.asList(
shardId = OperationRouting.generateShardId(metaData, null, routing);
rootDoc = Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
);
} else {
int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
writer.addDocument(Arrays.asList(
shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
rootDoc = Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
);
}
if (hasNested) {
List<Iterable<IndexableField>> docs = new ArrayList<>();
int numNested = randomIntBetween(0, 10);
for (int i = 0; i < numNested; i++) {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
docs.add(rootDoc);
writer.addDocuments(docs);
} else {
writer.addDocument(rootDoc);
}
}
writer.commit();
writer.close();
assertSplit(dir, metaData, targetShardId);
assertSplit(dir, metaData, targetShardId, hasNested);
dir.close();
}
@ -145,47 +204,94 @@ public class ShardSplittingQueryTests extends ESTestCase {
.setRoutingNumShards(numShards * 1000000)
.routingPartitionSize(randomIntBetween(1, 10))
.numberOfReplicas(0).build();
boolean hasNested = randomBoolean();
int targetShardId = randomIntBetween(0, numShards-1);
for (int j = 0; j < numDocs; j++) {
String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
final int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), routing);
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
if (hasNested) {
List<Iterable<IndexableField>> docs = new ArrayList<>();
int numNested = randomIntBetween(0, 10);
for (int i = 0; i < numNested; i++) {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
));
}
}
writer.commit();
writer.close();
assertSplit(dir, metaData, targetShardId);
assertSplit(dir, metaData, targetShardId, hasNested);
dir.close();
}
void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId) throws IOException {
void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId, boolean hasNested) throws IOException {
try (IndexReader reader = DirectoryReader.open(dir)) {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(null);
final boolean needsScores = false;
final Weight splitWeight = searcher.createNormalizedWeight(new ShardSplittingQuery(metaData, targetShardId), needsScores);
final Weight splitWeight = searcher.createNormalizedWeight(new ShardSplittingQuery(metaData, targetShardId, hasNested),
needsScores);
final List<LeafReaderContext> leaves = reader.leaves();
for (final LeafReaderContext ctx : leaves) {
Scorer scorer = splitWeight.scorer(ctx);
DocIdSetIterator iterator = scorer.iterator();
SortedNumericDocValues shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
int doc;
while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while (shard_id.nextDoc() < doc) {
long shardID = shard_id.nextValue();
assertEquals(shardID, targetShardId);
int numExpected = 0;
while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (targetShardId == shard_id.nextValue()) {
numExpected++;
}
assertEquals(shard_id.docID(), doc);
long shardID = shard_id.nextValue();
BytesRef id = reader.document(doc).getBinaryValue("_id");
String actualId = Uid.decodeId(id.bytes, id.offset, id.length);
assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID, targetShardId);
}
if (numExpected == ctx.reader().maxDoc()) {
// all docs belong in this shard
assertEquals(DocIdSetIterator.NO_MORE_DOCS, iterator.nextDoc());
} else {
shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
int doc;
int numActual = 0;
int lastDoc = 0;
while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
lastDoc = doc;
while (shard_id.nextDoc() < doc) {
long shardID = shard_id.nextValue();
assertEquals(shardID, targetShardId);
numActual++;
}
assertEquals(shard_id.docID(), doc);
long shardID = shard_id.nextValue();
BytesRef id = reader.document(doc).getBinaryValue("_id");
String actualId = Uid.decodeId(id.bytes, id.offset, id.length);
assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID, targetShardId);
}
if (lastDoc < ctx.reader().maxDoc()) {
// check the last docs in the segment and make sure they all have the right shard id
while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
long shardID = shard_id.nextValue();
assertEquals(shardID, targetShardId);
numActual++;
}
}
assertEquals(numExpected, numActual);
}
}
}

View File

@ -100,7 +100,7 @@ public class StoreRecoveryTests extends ESTestCase {
Directory target = newFSDirectory(createTempDir());
final long maxSeqNo = randomNonNegativeLong();
final long maxUnsafeAutoIdTimestamp = randomNonNegativeLong();
storeRecovery.addIndices(indexStats, target, indexSort, dirs, maxSeqNo, maxUnsafeAutoIdTimestamp, null, 0, false);
storeRecovery.addIndices(indexStats, target, indexSort, dirs, maxSeqNo, maxUnsafeAutoIdTimestamp, null, 0, false, false);
int numFiles = 0;
Predicate<String> filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false
&& f.startsWith("extra") == false;
@ -174,7 +174,7 @@ public class StoreRecoveryTests extends ESTestCase {
.setRoutingNumShards(numShards * 1000000)
.numberOfReplicas(0).build();
storeRecovery.addIndices(indexStats, target, indexSort, new Directory[] {dir}, maxSeqNo, maxUnsafeAutoIdTimestamp, metaData,
targetShardId, true);
targetShardId, true, false);
SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target);

View File

@ -75,6 +75,7 @@ import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extend
import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivativeTests;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.InternalAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.junit.After;
import org.junit.Before;
@ -157,7 +158,7 @@ public class AggregationsTests extends ESTestCase {
if (aggsTest instanceof InternalMultiBucketAggregationTestCase) {
// Lower down the number of buckets generated by multi bucket aggregation tests in
// order to avoid too many aggregations to be created.
((InternalMultiBucketAggregationTestCase) aggsTest).maxNumberOfBuckets = 3;
((InternalMultiBucketAggregationTestCase) aggsTest).setMaxNumberOfBuckets(3);
}
aggsTest.setUp();
}
@ -266,9 +267,13 @@ public class AggregationsTests extends ESTestCase {
if (testCase instanceof InternalMultiBucketAggregationTestCase) {
InternalMultiBucketAggregationTestCase multiBucketAggTestCase = (InternalMultiBucketAggregationTestCase) testCase;
if (currentDepth < maxDepth) {
multiBucketAggTestCase.subAggregationsSupplier = () -> createTestInstance(0, currentDepth + 1, maxDepth);
multiBucketAggTestCase.setSubAggregationsSupplier(
() -> createTestInstance(0, currentDepth + 1, maxDepth)
);
} else {
multiBucketAggTestCase.subAggregationsSupplier = () -> InternalAggregations.EMPTY;
multiBucketAggTestCase.setSubAggregationsSupplier(
() -> InternalAggregations.EMPTY
);
}
} else if (testCase instanceof InternalSingleBucketAggregationTestCase) {
InternalSingleBucketAggregationTestCase singleBucketAggTestCase = (InternalSingleBucketAggregationTestCase) testCase;

View File

@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.adjacency;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;

View File

@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.filter;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters.InternalBucket;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;

View File

@ -22,7 +22,7 @@ import org.apache.lucene.index.IndexWriter;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid.Bucket;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
@ -109,7 +109,7 @@ public class InternalGeoHashGridTests extends InternalMultiBucketAggregationTest
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
return ParsedGeoHashGrid.class;
}
@Override
protected InternalGeoHashGrid mutateInstance(InternalGeoHashGrid instance) {
String name = instance.getName();

View File

@ -23,7 +23,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.joda.time.DateTime;

View File

@ -26,7 +26,7 @@ import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;

View File

@ -22,7 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.range;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;

View File

@ -20,7 +20,7 @@
package org.elasticsearch.search.aggregations.bucket.significant;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;

View File

@ -20,10 +20,9 @@
package org.elasticsearch.search.aggregations.bucket.terms;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.junit.Before;
import org.elasticsearch.test.InternalAggregationTestCase;
import java.util.HashMap;
import java.util.List;

View File

@ -37,7 +37,7 @@ public class SnapshotRequestsTests extends ESTestCase {
XContentBuilder builder = jsonBuilder().startObject();
if(randomBoolean()) {
if (randomBoolean()) {
builder.field("indices", "foo,bar,baz");
} else {
builder.startArray("indices");
@ -76,6 +76,10 @@ public class SnapshotRequestsTests extends ESTestCase {
builder.value("set3");
builder.endArray();
}
boolean includeIgnoreUnavailable = randomBoolean();
if (includeIgnoreUnavailable) {
builder.field("ignore_unavailable", indicesOptions.ignoreUnavailable());
}
BytesReference bytes = builder.endObject().bytes();
@ -89,7 +93,10 @@ public class SnapshotRequestsTests extends ESTestCase {
assertEquals(partial, request.partial());
assertEquals("val1", request.settings().get("set1"));
assertArrayEquals(request.ignoreIndexSettings(), new String[]{"set2", "set3"});
boolean expectedIgnoreAvailable = includeIgnoreUnavailable
? indicesOptions.ignoreUnavailable()
: IndicesOptions.strictExpandOpen().ignoreUnavailable();
assertEquals(expectedIgnoreAvailable, request.indicesOptions().ignoreUnavailable());
}
public void testCreateSnapshotRequestParsing() throws IOException {
@ -97,7 +104,7 @@ public class SnapshotRequestsTests extends ESTestCase {
XContentBuilder builder = jsonBuilder().startObject();
if(randomBoolean()) {
if (randomBoolean()) {
builder.field("indices", "foo,bar,baz");
} else {
builder.startArray("indices");
@ -134,6 +141,10 @@ public class SnapshotRequestsTests extends ESTestCase {
builder.value("set3");
builder.endArray();
}
boolean includeIgnoreUnavailable = randomBoolean();
if (includeIgnoreUnavailable) {
builder.field("ignore_unavailable", indicesOptions.ignoreUnavailable());
}
BytesReference bytes = builder.endObject().bytes();
@ -144,6 +155,10 @@ public class SnapshotRequestsTests extends ESTestCase {
assertArrayEquals(request.indices(), new String[]{"foo", "bar", "baz"});
assertEquals(partial, request.partial());
assertEquals("val1", request.settings().get("set1"));
boolean expectedIgnoreAvailable = includeIgnoreUnavailable
? indicesOptions.ignoreUnavailable()
: IndicesOptions.strictExpandOpen().ignoreUnavailable();
assertEquals(expectedIgnoreAvailable, request.indicesOptions().ignoreUnavailable());
}
}

View File

@ -22,7 +22,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
public class ConnectionProfileTests extends ESTestCase {
@ -65,16 +69,16 @@ public class ConnectionProfileTests extends ESTestCase {
assertNull(build.getHandshakeTimeout());
}
Integer[] array = new Integer[10];
for (int i = 0; i < array.length; i++) {
array[i] = i;
List<Integer> list = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
list.add(i);
}
final int numIters = randomIntBetween(5, 10);
assertEquals(4, build.getHandles().size());
assertEquals(0, build.getHandles().get(0).offset);
assertEquals(1, build.getHandles().get(0).length);
assertEquals(EnumSet.of(TransportRequestOptions.Type.BULK), build.getHandles().get(0).getTypes());
Integer channel = build.getHandles().get(0).getChannel(array);
Integer channel = build.getHandles().get(0).getChannel(list);
for (int i = 0; i < numIters; i++) {
assertEquals(0, channel.intValue());
}
@ -83,7 +87,7 @@ public class ConnectionProfileTests extends ESTestCase {
assertEquals(2, build.getHandles().get(1).length);
assertEquals(EnumSet.of(TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY),
build.getHandles().get(1).getTypes());
channel = build.getHandles().get(1).getChannel(array);
channel = build.getHandles().get(1).getChannel(list);
for (int i = 0; i < numIters; i++) {
assertThat(channel, Matchers.anyOf(Matchers.is(1), Matchers.is(2)));
}
@ -91,7 +95,7 @@ public class ConnectionProfileTests extends ESTestCase {
assertEquals(3, build.getHandles().get(2).offset);
assertEquals(3, build.getHandles().get(2).length);
assertEquals(EnumSet.of(TransportRequestOptions.Type.PING), build.getHandles().get(2).getTypes());
channel = build.getHandles().get(2).getChannel(array);
channel = build.getHandles().get(2).getChannel(list);
for (int i = 0; i < numIters; i++) {
assertThat(channel, Matchers.anyOf(Matchers.is(3), Matchers.is(4), Matchers.is(5)));
}
@ -99,7 +103,7 @@ public class ConnectionProfileTests extends ESTestCase {
assertEquals(6, build.getHandles().get(3).offset);
assertEquals(4, build.getHandles().get(3).length);
assertEquals(EnumSet.of(TransportRequestOptions.Type.REG), build.getHandles().get(3).getTypes());
channel = build.getHandles().get(3).getChannel(array);
channel = build.getHandles().get(3).getChannel(list);
for (int i = 0; i < numIters; i++) {
assertThat(channel, Matchers.anyOf(Matchers.is(6), Matchers.is(7), Matchers.is(8), Matchers.is(9)));
}
@ -119,7 +123,7 @@ public class ConnectionProfileTests extends ESTestCase {
TransportRequestOptions.Type.REG);
builder.addConnections(0, TransportRequestOptions.Type.PING);
ConnectionProfile build = builder.build();
Integer[] array = new Integer[]{Integer.valueOf(0)};
List<Integer> array = Collections.singletonList(0);
assertEquals(Integer.valueOf(0), build.getHandles().get(0).getChannel(array));
expectThrows(IllegalStateException.class, () -> build.getHandles().get(1).getChannel(array));
}

View File

@ -37,11 +37,10 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import static org.hamcrest.Matchers.equalTo;
@ -178,25 +177,21 @@ public class TcpTransportTests extends ESTestCase {
ThreadPool threadPool = new TestThreadPool(TcpTransportTests.class.getName());
AtomicReference<IOException> exceptionReference = new AtomicReference<>();
try {
TcpTransport transport = new TcpTransport("test", Settings.builder().put("transport.tcp.compress", compressed).build(),
threadPool, new BigArrays(Settings.EMPTY, null), null, null, null) {
TcpTransport<FakeChannel> transport = new TcpTransport<FakeChannel>(
"test", Settings.builder().put("transport.tcp.compress", compressed).build(), threadPool,
new BigArrays(Settings.EMPTY, null), null, null, null) {
@Override
protected InetSocketAddress getLocalAddress(Object o) {
protected InetSocketAddress getLocalAddress(FakeChannel o) {
return null;
}
@Override
protected Object bind(String name, InetSocketAddress address) throws IOException {
protected FakeChannel bind(String name, InetSocketAddress address) throws IOException {
return null;
}
@Override
protected void closeChannels(List channel, boolean blocking, boolean doNotLinger) throws IOException {
}
@Override
protected void sendMessage(Object o, BytesReference reference, ActionListener listener) {
protected void sendMessage(FakeChannel o, BytesReference reference, ActionListener listener) {
try {
StreamInput streamIn = reference.streamInput();
streamIn.skip(TcpHeader.MARKER_BYTES_SIZE);
@ -224,14 +219,10 @@ public class TcpTransportTests extends ESTestCase {
}
@Override
protected NodeChannels connectToChannels(
DiscoveryNode node, ConnectionProfile profile, Consumer onChannelClose) throws IOException {
return new NodeChannels(node, new Object[profile.getNumConnections()], profile);
}
@Override
protected boolean isOpen(Object o) {
return false;
protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout,
ActionListener<FakeChannel> connectListener) throws IOException {
FakeChannel fakeChannel = new FakeChannel();
return fakeChannel;
}
@Override
@ -241,8 +232,12 @@ public class TcpTransportTests extends ESTestCase {
@Override
public NodeChannels getConnection(DiscoveryNode node) {
return new NodeChannels(node, new Object[MockTcpTransport.LIGHT_PROFILE.getNumConnections()],
MockTcpTransport.LIGHT_PROFILE);
int numConnections = MockTcpTransport.LIGHT_PROFILE.getNumConnections();
ArrayList<FakeChannel> fakeChannels = new ArrayList<>(numConnections);
for (int i = 0; i < numConnections; ++i) {
fakeChannels.add(new FakeChannel());
}
return new NodeChannels(node, fakeChannels, MockTcpTransport.LIGHT_PROFILE, Version.CURRENT);
}
};
DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT);
@ -255,6 +250,26 @@ public class TcpTransportTests extends ESTestCase {
}
}
private static final class FakeChannel implements TcpChannel {
@Override
public void close() {
}
@Override
public void addCloseListener(ActionListener<TcpChannel> listener) {
}
@Override
public void setSoLinger(int value) throws IOException {
}
@Override
public boolean isOpen() {
return false;
}
}
private static final class Req extends TransportRequest {
public String value;

View File

@ -82,6 +82,10 @@ bin\elasticsearch-plugin install file:///C:/path/to/plugin.zip
-----------------------------------
+
NOTE: Any path that contains spaces must be wrapped in quotes!
+
NOTE: If you are installing a plugin from the filesystem the plugin distribution
must not be contained in the `plugins` directory for the node that you are
installing the plugin to or installation will fail.
HTTP::
To install a plugin from a HTTP URL:

View File

@ -53,3 +53,5 @@ include::bucket/significanttext-aggregation.asciidoc[]
include::bucket/terms-aggregation.asciidoc[]
include::bucket/composite-aggregation.asciidoc[]

View File

@ -0,0 +1,581 @@
[[search-aggregations-bucket-composite-aggregation]]
=== Composite Aggregation
experimental[]
A multi-bucket aggregation that creates composite buckets from different sources.
Unlike the other `multi-bucket` aggregation the `composite` aggregation can be used
to paginate **all** buckets from a multi-level aggregation efficiently. This aggregation
provides a way to stream **all** buckets of a specific aggregation similarly to what
<<search-request-scroll, scroll>> does for documents.
The composite buckets are built from the combinations of the
values extracted/created for each document and each combination is considered as
a composite bucket.
//////////////////////////
[source,js]
--------------------------------------------------
PUT /sales
{
"mappings": {
"docs": {
"properties": {
"product": {
"type": "keyword"
},
"timestamp": {
"type": "date"
},
"price": {
"type": "long"
},
"shop": {
"type": "keyword"
}
}
}
}
}
POST /sales/docs/_bulk?refresh
{"index":{"_id":0}}
{"product": "mad max", "price": "20", "timestamp": "2017-05-09T14:35"}
{"index":{"_id":1}}
{"product": "mad max", "price": "25", "timestamp": "2017-05-09T12:35"}
{"index":{"_id":2}}
{"product": "rocky", "price": "10", "timestamp": "2017-05-08T09:10"}
{"index":{"_id":3}}
{"product": "mad max", "price": "27", "timestamp": "2017-05-10T07:07"}
{"index":{"_id":4}}
{"product": "apocalypse now", "price": "10", "timestamp": "2017-05-11T08:35"}
-------------------------------------------------
// NOTCONSOLE
// TESTSETUP
//////////////////////////
For instance the following document:
```
{
"keyword": ["foo", "bar"],
"number": [23, 65, 76]
}
```
\... creates the following composite buckets when `keyword` and `number` are used as values source
for the aggregation:
```
{ "keyword": "foo", "number": 23 }
{ "keyword": "foo", "number": 65 }
{ "keyword": "foo", "number": 76 }
{ "keyword": "bar", "number": 23 }
{ "keyword": "bar", "number": 65 }
{ "keyword": "bar", "number": 76 }
```
==== Values source
The `values` parameter controls the sources that should be used to build the composite buckets.
There are three different types of values source:
===== Terms
The `terms` value source is equivalent to a simple `terms` aggregation.
The values are extracted from a field or a script exactly like the `terms` aggregation.
Example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "product": { "terms" : { "field": "product" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
Like the `terms` aggregation it is also possible to use a script to create the values for the composite buckets:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{
"product": {
"terms" : {
"script" : {
"source": "doc['product'].value",
"lang": "painless"
}
}
}
}
]
}
}
}
}
--------------------------------------------------
// CONSOLE
===== Histogram
The `histogram` value source can be applied on numeric values to build fixed size
interval over the values. The `interval` parameter defines how the numeric values should be
transformed. For instance an `interval` set to 5 will translate any numeric values to its closest interval,
a value of `101` would be translated to `100` which is the key for the interval between 100 and 105.
Example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "histo": { "histogram" : { "field": "price", "interval": 5 } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
The values are built from a numeric field or a script that return numerical values:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{
"histo": {
"histogram" : {
"interval": 5,
"script" : {
"source": "doc['price'].value",
"lang": "painless"
}
}
}
}
]
}
}
}
}
--------------------------------------------------
// CONSOLE
===== Date Histogram
The `date_histogram` is similar to the `histogram` value source except that the interval
is specified by date/time expression:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "date": { "date_histogram" : { "field": "timestamp", "interval": "1d" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
The example above creates an interval per day and translates all `timestamp` values to the start of its closest intervals.
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
Time values can also be specified via abbreviations supported by <<time-units,time units>> parsing.
Note that fractional time values are not supported, but you can address this by shifting to another
time unit (e.g., `1.5h` could instead be specified as `90m`).
====== Time Zone
Date-times are stored in Elasticsearch in UTC. By default, all bucketing and
rounding is also done in UTC. The `time_zone` parameter can be used to indicate
that bucketing should use a different time zone.
Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or
`-08:00`) or as a timezone id, an identifier used in the TZ database like
`America/Los_Angeles`.
===== Mixing different values source
The `sources` parameter accepts an array of values source.
It is possible to mix different values source to create composite buckets.
For example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
{ "product": { "terms": {"field": "product" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
This will create composite buckets from the values created by two values source, a `date_histogram` and a `terms`.
Each bucket is composed of two values, one for each value source defined in the aggregation.
Any type of combinations is allowed and the order in the array is preserved
in the composite buckets.
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "shop": { "terms": {"field": "shop" } } },
{ "product": { "terms": { "field": "product" } } },
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
==== Order
By default the composite buckets are sorted by their natural ordering. Values are sorted
in ascending order of their values. When multiple value sources are requested, the ordering is done per value
source, the first value of the composite bucket is compared to the first value of the other composite bucket and if they are equals the
next values in the composite bucket are used for tie-breaking. This means that the composite bucket
`[foo, 100]` is considered smaller than `[foobar, 0]` because `foo` is considered smaller than `foobar`.
It is possible to define the direction of the sort for each value source by setting `order` to `asc` (default value)
or `desc` (descending order) directly in the value source definition.
For example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
{ "product": { "terms": {"field": "product", "order": "asc" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
and in ascending order when comparing values from the `terms` source.
==== Size
The `size` parameter can be set to define how many composite buckets should be returned.
Each composite bucket is considered as a single bucket so setting a size of 10 will return the
first 1O composite buckets created from the values source.
The response contains the values for each composite bucket in an array containing the values extracted
from each value source.
==== After
If the number of composite buckets is too high (or unknown) to be returned in a single response
it is possible to split the retrieval in multiple requests.
Since the composite buckets are flat by nature, the requested `size` is exactly the number of composite buckets
that will be returned in the response (assuming that they are at least `size` composite buckets to return).
If all composite buckets should be retrieved it is preferable to use a small size (`100` or `1000` for instance)
and then use the `after` parameter to retrieve the next results.
For example:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
{ "product": { "terms": {"field": "product" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[s/_search/_search\?filter_path=aggregations/]
\... returns:
[source,js]
--------------------------------------------------
{
...
"aggregations": {
"my_buckets": {
"buckets": [
{
"key": {
"date": 1494201600000,
"product": "rocky"
},
"doc_count": 1
},
{
"key": { <1>
"date": 1494288000000,
"product": "mad max"
},
"doc_count": 2
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\.//]
<1> The last composite bucket returned by the query.
The `after` parameter can be used to retrieve the composite buckets that are **after**
the last composite buckets returned in a previous round.
For the example below the last bucket is `"key": [1494288000000, "mad max"]` so the next
round of result can be retrieved with:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
{ "product": { "terms": {"field": "product", "order": "asc" } } }
],
"after": { "date": 1494288000000, "product": "mad max" } <1>
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> Should restrict the aggregation to buckets that sort **after** the provided values.
==== Sub-aggregations
Like any `multi-bucket` aggregations the `composite` aggregation can hold sub-aggregations.
These sub-aggregations can be used to compute other buckets or statistics on each composite bucket created by this
parent aggregation.
For instance the following example computes the average value of a field
per composite bucket:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
{ "product": { "terms": {"field": "product" } } }
]
},
"aggregations": {
"the_avg": {
"avg": { "field": "price" }
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[s/_search/_search\?filter_path=aggregations/]
\... returns:
[source,js]
--------------------------------------------------
{
...
"aggregations": {
"my_buckets": {
"buckets": [
{
"key": {
"date": 1494460800000,
"product": "apocalypse now"
},
"doc_count": 1,
"the_avg": {
"value": 10.0
}
},
{
"key": {
"date": 1494374400000,
"product": "mad max"
},
"doc_count": 1,
"the_avg": {
"value": 27.0
}
},
{
"key": {
"date": 1494288000000,
"product" : "mad max"
},
"doc_count": 2,
"the_avg": {
"value": 22.5
}
},
{
"key": {
"date": 1494201600000,
"product": "rocky"
},
"doc_count": 1,
"the_avg": {
"value": 10.0
}
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\.//]
==== Index sorting
By default this aggregation runs on every document that match the query.
Though if the index sort matches the composite sort this aggregation can optimize
the execution and can skip documents that contain composite buckets that would not
be part of the response.
For instance the following aggregations:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "asc" } } },
{ "product": { "terms": { "field": "product", "order": "asc" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
\... is much faster on an index that uses the following sort:
[source,js]
--------------------------------------------------
PUT twitter
{
"settings" : {
"index" : {
"sort.field" : ["timestamp", "product"],
"sort.order" : ["asc", "asc"]
}
},
"mappings": {
"sales": {
"properties": {
"timestamp": {
"type": "date"
},
"product": {
"type": "keyword"
}
}
}
}
}
--------------------------------------------------
// CONSOLE
WARNING: The optimization takes effect only if the fields used for sorting are single-valued and follow
the same order as the aggregation (`desc` or `asc`).
If only the aggregation results are needed it is also better to set the size of the query to 0
and `track_total_hits` to false in order to remove other slowing factors:
[source,js]
--------------------------------------------------
GET /_search
{
"size": 0,
"track_total_hits": false,
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
{ "product": { "terms": { "field": "product" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
See <<index-modules-index-sorting, index sorting>> for more details.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

View File

@ -374,3 +374,66 @@ PUT my_index
// CONSOLE
<1> `question` is parent of `answer` and `comment`.
==== Multiple levels of parent join
WARNING: Using multiple levels of relations to replicate a relational model is not recommended.
Each level of relation adds an overhead at query time in terms of memory and computation.
You should de-normalize your data if you care about performance.
Multiple levels of parent/child:
[source,js]
--------------------------------------------------
PUT my_index
{
"mappings": {
"doc": {
"properties": {
"my_join_field": {
"type": "join",
"relations": {
"question": ["answer", "comment"], <1>
"answer": "vote" <2>
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> `question` is parent of `answer` and `comment`
<2> `answer` is parent of `vote`
The mapping above represents the following tree:
question
/ \
/ \
comment answer
|
|
vote
Indexing a grand child document requires a `routing` value equals
to the grand-parent (the greater parent of the lineage):
[source,js]
--------------------------------------------------
PUT my_index/doc/3?routing=1&refresh <1>
{
"text": "This is a vote",
"my_join_field": {
"name": "vote",
"parent": "2" <2>
}
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
<1> This child document must be on the same shard than its grand-parent and parent
<2> The parent id of this document (must points to an `answer` document)

View File

@ -67,15 +67,28 @@ name, node name and roles to be set, in addition to memory and network settings:
[[msi-installer-configuration]]
image::images/msi_installer/msi_installer_configuration.png[]
Finally, the installer provides a list of common plugins that can be downloaded and installed as
part of the installation:
A list of common plugins that can be downloaded and installed as
part of the installation, with the option to configure a HTTPS proxy through which to download:
[[msi-installer-selected-plugins]]
image::images/msi_installer/msi_installer_selected_plugins.png[]
NOTE: X-Pack includes a trial license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension.
Upon choosing to install X-Pack plugin, an additional step allows a choice of the type of X-Pack
license to install, in addition to X-Pack Security configuration and built-in user configuration:
After clicking the install button, Elasticsearch will be installed:
[[msi-installer-xpack]]
image::images/msi_installer/msi_installer_xpack.png[]
NOTE: X-Pack includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the
https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security].
The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension.
After clicking the install button, the installer will begin installation:
[[msi-installer-installing]]
image::images/msi_installer/msi_installer_installing.png[]
and will indicate when it has been successfully installed:
[[msi-installer-success]]
image::images/msi_installer/msi_installer_success.png[]
@ -128,12 +141,14 @@ as _properties_ within Windows Installer documentation) that can be passed to ms
[horizontal]
`INSTALLDIR`::
The installation directory. Defaults to `%ProgramW6432%\Elastic\Elasticsearch`
The installation directory. The final directory in the path **must**
be the version of Elasticsearch.
Defaults to ++%ProgramW6432%\Elastic\Elasticsearch{backslash}{version}++.
`DATADIRECTORY`::
The directory in which to store your data.
Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data`
Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data`
`CONFIGDIRECTORY`::
@ -211,8 +226,8 @@ Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data`
`SELECTEDMEMORY`::
The amount of memory to allocate to the JVM heap for Elasticsearch.
Defaults to `2048`. If the target machine has less than 4GB in total, defaults
to 50% of total memory.
Defaults to `2048` unless the target machine has less than 4GB in total, in which case
it defaults to 50% of total memory.
`LOCKMEMORY`::
@ -245,7 +260,60 @@ Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data`
`PLUGINS`::
A comma separated list of the plugins to download and install as part of the installation. Defaults `""`
A comma separated list of the plugins to download and install as part of the installation. Defaults to `""`
`HTTPSPROXYHOST`::
The proxy host to use to download plugins over HTTPS. Defaults to `""`
`HTTPSPROXYPORT`::
The proxy port to use to download plugins over HTTPS. Defaults to `443`
`HTTPPROXYHOST`::
The proxy host to use to download plugins over HTTP. Defaults to `""`
`HTTPPROXYPORT`::
The proxy port to use to download plugins over HTTP. Defaults to `80`
`XPACKLICENSE`::
When installing X-Pack plugin, the type of license to install,
either `Basic` or `Trial`. Defaults to `Basic`
`XPACKSECURITYENABLED`::
When installing X-Pack plugin with a `Trial` license, whether X-Pack Security should be enabled.
Defaults to `true`
`BOOTSTRAPPASSWORD`::
When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password to
used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore.
Defaults to a randomized value.
`SKIPSETTINGPASSWORDS`::
When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, whether the
installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`.
Defaults to `false`
`ELASTICUSERPASSWORD`::
When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password
to use for the built-in user `elastic`. Defaults to `""`
`KIBANAUSERPASSWORD`::
When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password
to use for the built-in user `kibana`. Defaults to `""`
`LOGSTASHSYSTEMUSERPASSWORD`::
When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password
to use for the built-in user `logstash_system`. Defaults to `""`
To pass a value, simply append the property name and value using the format `<PROPERTYNAME>="<VALUE>"` to
the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]:
@ -280,7 +348,7 @@ to the `<cluster name>.log` file within `LOGSDIRECTORY`, and can be stopped by p
[[msi-installer-command-line-configuration]]
==== Configuring Elasticsearch on the command line
Elasticsearch loads its configuration from the `%ES_HOME%\config\elasticsearch.yml`
Elasticsearch loads its configuration from the `%ES_PATH_CONF%\elasticsearch.yml`
file by default. The format of this config file is explained in
<<settings>>.
@ -335,7 +403,7 @@ Get-Service Elasticsearch | Stop-Service | Start-Service
Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the
service after installation. Most changes (like JVM settings) will require a restart of the
service in order to take affect.
service in order to take effect.
[[upgrade-msi-gui]]
==== Upgrade using the graphical user interface (GUI)
@ -369,11 +437,17 @@ The `.msi` can also upgrade Elasticsearch using the command line.
[IMPORTANT]
===========================================
A command line upgrade requires passing the **same** command line properties as
used at first install time; the Windows Installer does not remember these properties.
used at first install time; the Windows Installer does not remember these properties.
For example, if you originally installed with the command line options `PLUGINS="x-pack"` and
`LOCKMEMORY="true"`, then you must pass these same values when performing an
upgrade from the command line.
The **exception** to this is `INSTALLDIR` (if originally specified), which must be a different directory to the
current installation.
If setting `INSTALLDIR`, the final directory in the path **must** be the version of Elasticsearch e.g.
++C:\Program Files\Elastic\Elasticsearch{backslash}{version}++
===========================================
The simplest upgrade, assuming Elasticsearch was installed using all defaults,
@ -397,8 +471,8 @@ start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l upgrade.log
The `.msi` package handles uninstallation of all directories and files added as part of installation.
WARNING: Uninstallation will remove all directories and their contents created as part of
installation, including data within the data directory. If you wish to retain your data upon
WARNING: Uninstallation will remove **all** directories and their contents created as part of
installation, **including data within the data directory**. If you wish to retain your data upon
uninstallation, it is recommended that you make a copy of the data directory before uninstallation.
MSI installer packages do not provide a GUI for uninstallation. An installed program can be uninstalled

View File

@ -0,0 +1,27 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
esplugin {
description 'A multi-bucket aggregation that can paginate buckets from different sources efficiently.'
classname 'org.elasticsearch.search.aggregations.composite.CompositeAggregationPlugin'
hasClientJar = true
}
compileJava.options.compilerArgs << "-Xlint:-deprecation"
compileTestJava.options.compilerArgs << "-Xlint:-deprecation"

View File

@ -0,0 +1,74 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public interface CompositeAggregation extends MultiBucketsAggregation {
interface Bucket extends MultiBucketsAggregation.Bucket {
Map<String, Object> getKey();
}
@Override
List<? extends CompositeAggregation.Bucket> getBuckets();
/**
* Returns the last key in this aggregation. It can be used to retrieve the buckets that are after these values.
* See {@link CompositeAggregationBuilder#aggregateAfter}.
*/
Map<String, Object> afterKey();
static XContentBuilder bucketToXContent(CompositeAggregation.Bucket bucket,
XContentBuilder builder, Params params) throws IOException {
builder.startObject();
buildCompositeMap(CommonFields.KEY.getPreferredName(), bucket.getKey(), builder);
builder.field(CommonFields.DOC_COUNT.getPreferredName(), bucket.getDocCount());
bucket.getAggregations().toXContentInternal(builder, params);
builder.endObject();
return builder;
}
static XContentBuilder toXContentFragment(CompositeAggregation aggregation, XContentBuilder builder, Params params) throws IOException {
builder.startArray(CommonFields.BUCKETS.getPreferredName());
for (CompositeAggregation.Bucket bucket : aggregation.getBuckets()) {
bucketToXContent(bucket, builder, params);
}
builder.endArray();
return builder;
}
static void buildCompositeMap(String fieldName, Map<String, Object> composite, XContentBuilder builder) throws IOException {
builder.startObject(fieldName);
for (Map.Entry<String, Object> entry : composite.entrySet()) {
if (entry.getValue().getClass() == BytesRef.class) {
builder.field(entry.getKey(), ((BytesRef) entry.getValue()).utf8ToString());
} else {
builder.field(entry.getKey(), entry.getValue());
}
}
builder.endObject();
}
}

View File

@ -0,0 +1,218 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSortConfig;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class CompositeAggregationBuilder extends AbstractAggregationBuilder<CompositeAggregationBuilder> {
public static final String NAME = "composite";
public static final ParseField AFTER_FIELD_NAME = new ParseField("after");
public static final ParseField SIZE_FIELD_NAME = new ParseField("size");
public static final ParseField SOURCES_FIELD_NAME = new ParseField("sources");
private static final ObjectParser<CompositeAggregationBuilder, Void> PARSER;
static {
PARSER = new ObjectParser<>(NAME);
PARSER.declareInt(CompositeAggregationBuilder::size, SIZE_FIELD_NAME);
PARSER.declareObject(CompositeAggregationBuilder::aggregateAfter, (parser, context) -> parser.map(), AFTER_FIELD_NAME);
PARSER.declareObjectArray(CompositeAggregationBuilder::setSources,
(p, c) -> CompositeValuesSourceParserHelper.fromXContent(p), SOURCES_FIELD_NAME);
}
public static CompositeAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
return PARSER.parse(parser, new CompositeAggregationBuilder(aggregationName), null);
}
private List<CompositeValuesSourceBuilder<?>> sources;
private Map<String, Object> after;
private int size = 10;
private CompositeAggregationBuilder(String name) {
this(name, null);
}
public CompositeAggregationBuilder(String name, List<CompositeValuesSourceBuilder<?>> sources) {
super(name);
this.sources = sources;
}
public CompositeAggregationBuilder(StreamInput in) throws IOException {
super(in);
int num = in.readVInt();
this.sources = new ArrayList<>(num);
for (int i = 0; i < num; i++) {
CompositeValuesSourceBuilder<?> builder = CompositeValuesSourceParserHelper.readFrom(in);
sources.add(builder);
}
this.size = in.readVInt();
if (in.readBoolean()) {
this.after = in.readMap();
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeVInt(sources.size());
for (CompositeValuesSourceBuilder<?> builder : sources) {
CompositeValuesSourceParserHelper.writeTo(builder, out);
}
out.writeVInt(size);
out.writeBoolean(after != null);
if (after != null) {
out.writeMap(after);
}
}
@Override
public String getType() {
return NAME;
}
private CompositeAggregationBuilder setSources(List<CompositeValuesSourceBuilder<?>> sources) {
this.sources = sources;
return this;
}
/**
* Gets the list of {@link CompositeValuesSourceBuilder} for this aggregation.
*/
public List<CompositeValuesSourceBuilder<?>> sources() {
return sources;
}
/**
* Sets the values that indicates which composite bucket this request should "aggregate after".
* Defaults to <tt>null</tt>.
*/
public CompositeAggregationBuilder aggregateAfter(Map<String, Object> afterKey) {
this.after = afterKey;
return this;
}
/**
* The number of composite buckets to return. Defaults to <tt>10</tt>.
*/
public CompositeAggregationBuilder size(int size) {
this.size = size;
return this;
}
@Override
protected AggregatorFactory<?> doBuild(SearchContext context, AggregatorFactory<?> parent,
AggregatorFactories.Builder subfactoriesBuilder) throws IOException {
if (parent != null) {
throw new IllegalArgumentException("[composite] aggregation cannot be used with a parent aggregation");
}
final QueryShardContext shardContext = context.getQueryShardContext();
CompositeValuesSourceConfig[] configs = new CompositeValuesSourceConfig[sources.size()];
SortField[] sortFields = new SortField[configs.length];
IndexSortConfig indexSortConfig = shardContext.getIndexSettings().getIndexSortConfig();
if (indexSortConfig.hasIndexSort()) {
Sort sort = indexSortConfig.buildIndexSort(shardContext::fieldMapper, shardContext::getForField);
System.arraycopy(sort.getSort(), 0, sortFields, 0, sortFields.length);
}
List<String> sourceNames = new ArrayList<>();
for (int i = 0; i < configs.length; i++) {
configs[i] = sources.get(i).build(context, i, configs.length, sortFields[i]);
sourceNames.add(sources.get(i).name());
if (configs[i].valuesSource().needsScores()) {
throw new IllegalArgumentException("[sources] cannot access _score");
}
}
final CompositeKey afterKey;
if (after != null) {
if (after.size() != sources.size()) {
throw new IllegalArgumentException("[after] has " + after.size() +
" value(s) but [sources] has " + sources.size());
}
Comparable<?>[] values = new Comparable<?>[sources.size()];
for (int i = 0; i < sources.size(); i++) {
String sourceName = sources.get(i).name();
if (after.containsKey(sourceName) == false) {
throw new IllegalArgumentException("Missing value for [after." + sources.get(i).name() + "]");
}
Object obj = after.get(sourceName);
if (obj instanceof Comparable) {
values[i] = (Comparable<?>) obj;
} else {
throw new IllegalArgumentException("Invalid value for [after." + sources.get(i).name() +
"], expected comparable, got [" + (obj == null ? "null" : obj.getClass().getSimpleName()) + "]");
}
}
afterKey = new CompositeKey(values);
} else {
afterKey = null;
}
return new CompositeAggregationFactory(name, context, parent, subfactoriesBuilder, metaData, size, configs, sourceNames, afterKey);
}
@Override
protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SIZE_FIELD_NAME.getPreferredName(), size);
builder.startArray(SOURCES_FIELD_NAME.getPreferredName());
for (CompositeValuesSourceBuilder<?> source: sources) {
builder.startObject();
builder.startObject(source.name());
source.toXContent(builder, params);
builder.endObject();
builder.endObject();
}
builder.endArray();
if (after != null) {
CompositeAggregation.buildCompositeMap(AFTER_FIELD_NAME.getPreferredName(), after, builder);
}
builder.endObject();
return builder;
}
@Override
protected int doHashCode() {
return Objects.hash(sources, size, after);
}
@Override
protected boolean doEquals(Object obj) {
CompositeAggregationBuilder other = (CompositeAggregationBuilder) obj;
return size == other.size &&
Objects.equals(sources, other.sources) &&
Objects.equals(after, other.after);
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.List;
import java.util.Map;
class CompositeAggregationFactory extends AggregatorFactory<CompositeAggregationFactory> {
private final int size;
private final CompositeValuesSourceConfig[] sources;
private final List<String> sourceNames;
private final CompositeKey afterKey;
CompositeAggregationFactory(String name, SearchContext context, AggregatorFactory<?> parent,
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData,
int size, CompositeValuesSourceConfig[] sources,
List<String> sourceNames, CompositeKey afterKey) throws IOException {
super(name, context, parent, subFactoriesBuilder, metaData);
this.size = size;
this.sources = sources;
this.sourceNames = sourceNames;
this.afterKey = afterKey;
}
@Override
protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
return new CompositeAggregator(name, factories, context, parent, pipelineAggregators, metaData,
size, sources, sourceNames, afterKey);
}
}

View File

@ -0,0 +1,36 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
import java.util.Arrays;
import java.util.List;
public class CompositeAggregationPlugin extends Plugin implements SearchPlugin {
@Override
public List<AggregationSpec> getAggregations() {
return Arrays.asList(
new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder::parse)
.addResultReader(InternalComposite::new)
);
}
}

View File

@ -0,0 +1,237 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.RoaringDocIdSet;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
final class CompositeAggregator extends BucketsAggregator {
private final int size;
private final CompositeValuesSourceConfig[] sources;
private final List<String> sourceNames;
private final boolean canEarlyTerminate;
private final TreeMap<Integer, Integer> keys;
private final CompositeValuesComparator array;
private final List<LeafContext> contexts = new ArrayList<>();
private LeafContext leaf;
private RoaringDocIdSet.Builder builder;
CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
int size, CompositeValuesSourceConfig[] sources, List<String> sourceNames,
CompositeKey rawAfterKey) throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
this.size = size;
this.sources = sources;
this.sourceNames = sourceNames;
// we use slot 0 to fill the current document (size+1).
this.array = new CompositeValuesComparator(context.searcher().getIndexReader(), sources, size+1);
if (rawAfterKey != null) {
array.setTop(rawAfterKey.values());
}
this.keys = new TreeMap<>(array::compare);
this.canEarlyTerminate = Arrays.stream(sources)
.allMatch(CompositeValuesSourceConfig::canEarlyTerminate);
}
boolean canEarlyTerminate() {
return canEarlyTerminate;
}
private int[] getReverseMuls() {
return Arrays.stream(sources).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
}
@Override
public InternalAggregation buildAggregation(long zeroBucket) throws IOException {
assert zeroBucket == 0L;
// Replay all documents that contain at least one top bucket (collected during the first pass).
grow(keys.size()+1);
for (LeafContext context : contexts) {
DocIdSetIterator docIdSetIterator = context.docIdSet.iterator();
if (docIdSetIterator == null) {
continue;
}
final CompositeValuesSource.Collector collector =
array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector));
int docID;
while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
collector.collect(docID);
}
}
int num = Math.min(size, keys.size());
final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num];
final int[] reverseMuls = getReverseMuls();
int pos = 0;
for (int slot : keys.keySet()) {
CompositeKey key = array.toCompositeKey(slot);
InternalAggregations aggs = bucketAggregations(slot);
int docCount = bucketDocCount(slot);
buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, key, reverseMuls, docCount, aggs);
}
return new InternalComposite(name, size, sourceNames, Arrays.asList(buckets), reverseMuls, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildEmptyAggregation() {
final int[] reverseMuls = getReverseMuls();
return new InternalComposite(name, size, sourceNames, Collections.emptyList(), reverseMuls, pipelineAggregators(), metaData());
}
@Override
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
if (leaf != null) {
leaf.docIdSet = builder.build();
contexts.add(leaf);
}
leaf = new LeafContext(ctx, sub);
builder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc());
final CompositeValuesSource.Collector inner = array.getLeafCollector(ctx, getFirstPassCollector());
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0L;
inner.collect(doc);
}
};
}
@Override
protected void doPostCollection() throws IOException {
if (leaf != null) {
leaf.docIdSet = builder.build();
contexts.add(leaf);
}
}
/**
* The first pass selects the top N composite buckets from all matching documents.
* It also records all doc ids that contain a top N composite bucket in a {@link RoaringDocIdSet} in order to be
* able to replay the collection filtered on the best buckets only.
*/
private CompositeValuesSource.Collector getFirstPassCollector() {
return new CompositeValuesSource.Collector() {
int lastDoc = -1;
@Override
public void collect(int doc) throws IOException {
// Checks if the candidate key in slot 0 is competitive.
if (keys.containsKey(0)) {
// This key is already in the top N, skip it for now.
if (doc != lastDoc) {
builder.add(doc);
lastDoc = doc;
}
return;
}
if (array.hasTop() && array.compareTop(0) <= 0) {
// This key is greater than the top value collected in the previous round.
if (canEarlyTerminate) {
// The index sort matches the composite sort, we can early terminate this segment.
throw new CollectionTerminatedException();
}
// just skip this key for now
return;
}
if (keys.size() >= size) {
// The tree map is full, check if the candidate key should be kept.
if (array.compare(0, keys.lastKey()) > 0) {
// The candidate key is not competitive
if (canEarlyTerminate) {
// The index sort matches the composite sort, we can early terminate this segment.
throw new CollectionTerminatedException();
}
// just skip this key
return;
}
}
// The candidate key is competitive
final int newSlot;
if (keys.size() >= size) {
// the tree map is full, we replace the last key with this candidate.
int slot = keys.pollLastEntry().getKey();
// and we recycle the deleted slot
newSlot = slot;
} else {
newSlot = keys.size() + 1;
}
// move the candidate key to its new slot.
array.move(0, newSlot);
keys.put(newSlot, newSlot);
if (doc != lastDoc) {
builder.add(doc);
lastDoc = doc;
}
}
};
}
/**
* The second pass delegates the collection to sub-aggregations but only if the collected composite bucket is a top bucket (selected
* in the first pass).
*/
private CompositeValuesSource.Collector getSecondPassCollector(LeafBucketCollector subCollector) throws IOException {
return doc -> {
Integer bucket = keys.get(0);
if (bucket != null) {
// The candidate key in slot 0 is a top bucket.
// We can defer the collection of this document/bucket to the sub collector
collectExistingBucket(subCollector, doc, bucket);
}
};
}
static class LeafContext {
final LeafReaderContext ctx;
final LeafBucketCollector subCollector;
DocIdSet docIdSet;
LeafContext(LeafReaderContext ctx, LeafBucketCollector subCollector) {
this.ctx = ctx;
this.subCollector = subCollector;
}
}
}

View File

@ -0,0 +1,72 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.search.DocValueFormat;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* A key that is composed of multiple {@link Comparable} values.
*/
class CompositeKey {
private final Comparable<?>[] values;
CompositeKey(Comparable<?>... values) {
this.values = values;
}
Comparable<?>[] values() {
return values;
}
int size() {
return values.length;
}
Comparable<?> get(int pos) {
assert pos < values.length;
return values[pos];
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CompositeKey that = (CompositeKey) o;
return Arrays.equals(values, that.values);
}
@Override
public int hashCode() {
return Arrays.hashCode(values);
}
}

View File

@ -0,0 +1,148 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import static org.elasticsearch.search.aggregations.composite.CompositeValuesSource.wrapBinary;
import static org.elasticsearch.search.aggregations.composite.CompositeValuesSource.wrapDouble;
import static org.elasticsearch.search.aggregations.composite.CompositeValuesSource.wrapGlobalOrdinals;
import static org.elasticsearch.search.aggregations.composite.CompositeValuesSource.wrapLong;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals;
final class CompositeValuesComparator {
private final int size;
private final CompositeValuesSource<?, ?>[] arrays;
private boolean topValueSet = false;
/**
*
* @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets.
* @param size The number of composite buckets to keep.
*/
CompositeValuesComparator(IndexReader reader, CompositeValuesSourceConfig[] sources, int size) {
this.size = size;
this.arrays = new CompositeValuesSource<?, ?>[sources.length];
for (int i = 0; i < sources.length; i++) {
final int reverseMul = sources[i].reverseMul();
if (sources[i].valuesSource() instanceof WithOrdinals && reader instanceof DirectoryReader) {
WithOrdinals vs = (WithOrdinals) sources[i].valuesSource();
arrays[i] = wrapGlobalOrdinals(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Bytes) {
Bytes vs = (Bytes) sources[i].valuesSource();
arrays[i] = wrapBinary(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Numeric) {
final Numeric vs = (Numeric) sources[i].valuesSource();
if (vs.isFloatingPoint()) {
arrays[i] = wrapDouble(vs, size, reverseMul);
} else {
arrays[i] = wrapLong(vs, size, reverseMul);
}
}
}
}
/**
* Moves the values in <code>slot1</code> to <code>slot2</code>.
*/
void move(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
arrays[i].move(slot1, slot2);
}
}
/**
* Compares the values in <code>slot1</code> with <code>slot2</code>.
*/
int compare(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compare(slot1, slot2);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Returns true if a top value has been set for this comparator.
*/
boolean hasTop() {
return topValueSet;
}
/**
* Sets the top values for this comparator.
*/
void setTop(Comparable<?>[] values) {
assert values.length == arrays.length;
topValueSet = true;
for (int i = 0; i < arrays.length; i++) {
arrays[i].setTop(values[i]);
}
}
/**
* Compares the top values with the values in <code>slot</code>.
*/
int compareTop(int slot) {
assert slot < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compareTop(slot);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Builds the {@link CompositeKey} for <code>slot</code>.
*/
CompositeKey toCompositeKey(int slot) throws IOException {
assert slot < size;
Comparable<?>[] values = new Comparable<?>[arrays.length];
for (int i = 0; i < values.length; i++) {
values[i] = arrays[i].toComparable(slot);
}
return new CompositeKey(values);
}
/**
* Gets the {@link LeafBucketCollector} that will record the composite buckets of the visited documents.
*/
CompositeValuesSource.Collector getLeafCollector(LeafReaderContext context, CompositeValuesSource.Collector in) throws IOException {
int last = arrays.length - 1;
CompositeValuesSource.Collector next = arrays[last].getLeafCollector(context, in);
for (int i = last - 1; i >= 0; i--) {
next = arrays[i].getLeafCollector(context, next);
}
return next;
}
}

View File

@ -0,0 +1,410 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalMapping;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
/**
* A wrapper for {@link ValuesSource} that can record and compare values produced during a collection.
*/
abstract class CompositeValuesSource<VS extends ValuesSource, T extends Comparable<T>> {
interface Collector {
void collect(int doc) throws IOException;
}
protected final VS vs;
protected final int size;
protected final int reverseMul;
protected T topValue;
/**
*
* @param vs The original {@link ValuesSource}.
* @param size The number of values to record.
* @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed.
*/
CompositeValuesSource(VS vs, int size, int reverseMul) {
this.vs = vs;
this.size = size;
this.reverseMul = reverseMul;
}
/**
* The type of this source.
*/
abstract String type();
/**
* Moves the value in <code>from</code> in <code>to</code>.
* The value present in <code>to</code> is overridden.
*/
abstract void move(int from, int to);
/**
* Compares the value in <code>from</code> with the value in <code>to</code>.
*/
abstract int compare(int from, int to);
/**
* Compares the value in <code>slot</code> with the top value in this source.
*/
abstract int compareTop(int slot);
/**
* Sets the top value for this source. Values that compares smaller should not be recorded.
*/
abstract void setTop(Comparable<?> value);
/**
* Transforms the value in <code>slot</code> to a {@link Comparable} object.
*/
abstract Comparable<T> toComparable(int slot) throws IOException;
/**
* Gets the {@link LeafCollector} that will record the values of the visited documents.
*/
abstract Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException;
/**
* Creates a {@link CompositeValuesSource} that generates long values.
*/
static CompositeValuesSource<ValuesSource.Numeric, Long> wrapLong(ValuesSource.Numeric vs, int size, int reverseMul) {
return new LongValuesSource(vs, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates double values.
*/
static CompositeValuesSource<ValuesSource.Numeric, Double> wrapDouble(ValuesSource.Numeric vs, int size, int reverseMul) {
return new DoubleValuesSource(vs, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates binary values.
*/
static CompositeValuesSource<ValuesSource.Bytes, BytesRef> wrapBinary(ValuesSource.Bytes vs, int size, int reverseMul) {
return new BinaryValuesSource(vs, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates global ordinal values.
*/
static CompositeValuesSource<ValuesSource.Bytes.WithOrdinals, BytesRef> wrapGlobalOrdinals(ValuesSource.Bytes.WithOrdinals vs,
int size,
int reverseMul) {
return new GlobalOrdinalValuesSource(vs, size, reverseMul);
}
/**
* A {@link CompositeValuesSource} for global ordinals
*/
private static class GlobalOrdinalValuesSource extends CompositeValuesSource<ValuesSource.Bytes.WithOrdinals, BytesRef> {
private final long[] values;
private SortedSetDocValues lookup;
private Long topValueLong;
GlobalOrdinalValuesSource(ValuesSource.Bytes.WithOrdinals vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new long[size];
}
@Override
String type() {
return "global_ordinals";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Long.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return Long.compare(values[slot], topValueLong) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof BytesRef) {
topValue = (BytesRef) value;
} else if (value instanceof String) {
topValue = new BytesRef(value.toString());
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
Comparable<BytesRef> toComparable(int slot) throws IOException {
return BytesRef.deepCopyOf(lookup.lookupOrd(values[slot]));
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedSetDocValues dvs = vs.globalOrdinalsValues(context);
if (lookup == null) {
lookup = dvs;
if (topValue != null && topValueLong == null) {
if (lookup instanceof GlobalOrdinalMapping) {
// Find the global ordinal (or the insertion point) for the provided top value.
topValueLong = lookupGlobalOrdinals((GlobalOrdinalMapping) lookup, topValue);
} else {
// Global ordinals are not needed, switch back to ordinals (single segment case).
topValueLong = lookup.lookupTerm(topValue);
if (topValueLong < 0) {
// convert negative insert position
topValueLong = -topValueLong - 2;
}
}
}
}
return doc -> {
if (dvs.advanceExact(doc)) {
long ord;
while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) {
values[0] = ord;
next.collect(doc);
}
}
};
}
private static long lookupGlobalOrdinals(GlobalOrdinalMapping mapping, BytesRef key) throws IOException {
long low = 0;
long high = mapping.getValueCount();
while (low <= high) {
long mid = (low + high) >>> 1;
BytesRef midVal = mapping.lookupOrd(mid);
int cmp = midVal.compareTo(key);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid;
}
}
return low-1;
}
}
/**
* A {@link CompositeValuesSource} for binary source ({@link BytesRef})
*/
private static class BinaryValuesSource extends CompositeValuesSource<ValuesSource.Bytes, BytesRef> {
private final BytesRef[] values;
private BytesRef topValue;
BinaryValuesSource(ValuesSource.Bytes vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new BytesRef[size];
}
@Override
String type() {
return "binary";
}
@Override
public void move(int from, int to) {
values[to] = BytesRef.deepCopyOf(values[from]);
}
@Override
public int compare(int from, int to) {
return values[from].compareTo(values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return values[slot].compareTo(topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value.getClass() == BytesRef.class) {
topValue = (BytesRef) value;
} else if (value.getClass() == String.class) {
topValue = new BytesRef((String) value);
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
Comparable<BytesRef> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedBinaryDocValues dvs = vs.bytesValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
/**
* A {@link CompositeValuesSource} for longs.
*/
private static class LongValuesSource extends CompositeValuesSource<ValuesSource.Numeric, Long> {
private final long[] values;
private long topValue;
LongValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new long[size];
}
@Override
String type() {
return "long";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Long.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return Long.compare(values[slot], topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof Number) {
topValue = ((Number) value).longValue();
} else {
topValue = Long.parseLong(value.toString());
}
}
@Override
Comparable<Long> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedNumericDocValues dvs = vs.longValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
/**
* A {@link CompositeValuesSource} for doubles.
*/
private static class DoubleValuesSource extends CompositeValuesSource<ValuesSource.Numeric, Double> {
private final double[] values;
private double topValue;
DoubleValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new double[size];
}
@Override
String type() {
return "long";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Double.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return Double.compare(values[slot], topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof Number) {
topValue = ((Number) value).doubleValue();
} else {
topValue = Double.parseDouble(value.toString());
}
}
@Override
Comparable<Double> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedNumericDoubleValues dvs = vs.doubleValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
}

View File

@ -0,0 +1,304 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexSortConfig;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import java.util.Objects;
/**
* A {@link ValuesSource} builder for {@link CompositeAggregationBuilder}
*/
public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSourceBuilder<AB>> implements Writeable, ToXContentFragment {
protected final String name;
private String field = null;
private Script script = null;
private ValueType valueType = null;
private Object missing = null;
private SortOrder order = SortOrder.ASC;
CompositeValuesSourceBuilder(String name) {
this(name, null);
}
CompositeValuesSourceBuilder(String name, ValueType valueType) {
this.name = name;
this.valueType = valueType;
}
CompositeValuesSourceBuilder(StreamInput in) throws IOException {
this.name = in.readString();
this.field = in.readOptionalString();
if (in.readBoolean()) {
this.script = new Script(in);
}
if (in.readBoolean()) {
this.valueType = ValueType.readFromStream(in);
}
this.missing = in.readGenericValue();
this.order = SortOrder.readFromStream(in);
}
@Override
public final void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeOptionalString(field);
boolean hasScript = script != null;
out.writeBoolean(hasScript);
if (hasScript) {
script.writeTo(out);
}
boolean hasValueType = valueType != null;
out.writeBoolean(hasValueType);
if (hasValueType) {
valueType.writeTo(out);
}
out.writeGenericValue(missing);
order.writeTo(out);
innerWriteTo(out);
}
protected abstract void innerWriteTo(StreamOutput out) throws IOException;
protected abstract void doXContentBody(XContentBuilder builder, Params params) throws IOException;
@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(type());
if (field != null) {
builder.field("field", field);
}
if (script != null) {
builder.field("script", script);
}
if (missing != null) {
builder.field("missing", missing);
}
if (valueType != null) {
builder.field("value_type", valueType.getPreferredName());
}
builder.field("order", order);
doXContentBody(builder, params);
builder.endObject();
return builder;
}
@Override
public final int hashCode() {
return Objects.hash(field, missing, script, valueType, order, innerHashCode());
}
protected abstract int innerHashCode();
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
@SuppressWarnings("unchecked")
AB that = (AB) o;
return Objects.equals(field, that.field()) &&
Objects.equals(script, that.script()) &&
Objects.equals(valueType, that.valueType()) &&
Objects.equals(missing, that.missing()) &&
Objects.equals(order, that.order()) &&
innerEquals(that);
}
protected abstract boolean innerEquals(AB builder);
public String name() {
return name;
}
abstract String type();
/**
* Sets the field to use for this source
*/
@SuppressWarnings("unchecked")
public AB field(String field) {
if (field == null) {
throw new IllegalArgumentException("[field] must not be null");
}
this.field = field;
return (AB) this;
}
/**
* Gets the field to use for this source
*/
public String field() {
return field;
}
/**
* Sets the script to use for this source
*/
@SuppressWarnings("unchecked")
public AB script(Script script) {
if (script == null) {
throw new IllegalArgumentException("[script] must not be null");
}
this.script = script;
return (AB) this;
}
/**
* Gets the script to use for this source
*/
public Script script() {
return script;
}
/**
* Sets the {@link ValueType} for the value produced by this source
*/
@SuppressWarnings("unchecked")
public AB valueType(ValueType valueType) {
if (valueType == null) {
throw new IllegalArgumentException("[valueType] must not be null");
}
this.valueType = valueType;
return (AB) this;
}
/**
* Gets the {@link ValueType} for the value produced by this source
*/
public ValueType valueType() {
return valueType;
}
/**
* Sets the value to use when the source finds a missing value in a
* document
*/
@SuppressWarnings("unchecked")
public AB missing(Object missing) {
if (missing == null) {
throw new IllegalArgumentException("[missing] must not be null");
}
this.missing = missing;
return (AB) this;
}
public Object missing() {
return missing;
}
/**
* Sets the {@link SortOrder} to use to sort values produced this source
*/
@SuppressWarnings("unchecked")
public AB order(String order) {
if (order == null) {
throw new IllegalArgumentException("[order] must not be null");
}
this.order = SortOrder.fromString(order);
return (AB) this;
}
/**
* Sets the {@link SortOrder} to use to sort values produced this source
*/
@SuppressWarnings("unchecked")
public AB order(SortOrder order) {
if (order == null) {
throw new IllegalArgumentException("[order] must not be null");
}
this.order = order;
return (AB) this;
}
/**
* Gets the {@link SortOrder} to use to sort values produced this source
*/
public SortOrder order() {
return order;
}
/**
* Creates a {@link CompositeValuesSourceConfig} for this source.
*
* @param context The search context for this source.
* @param config The {@link ValuesSourceConfig} for this source.
* @param pos The position of this source in the composite key.
* @param numPos The total number of positions in the composite key.
* @param sortField The {@link SortField} of the index sort at this position or null if not present.
*/
protected abstract CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException;
public final CompositeValuesSourceConfig build(SearchContext context, int pos, int numPos, SortField sortField) throws IOException {
ValuesSourceConfig<?> config = ValuesSourceConfig.resolve(context.getQueryShardContext(),
valueType, field, script, missing, null, null);
return innerBuild(context, config, pos, numPos, sortField);
}
protected boolean checkCanEarlyTerminate(IndexReader reader,
String fieldName,
boolean reverse,
SortField sortField) throws IOException {
return sortField.getField().equals(fieldName) &&
sortField.getReverse() == reverse &&
isSingleValued(reader, sortField);
}
private static boolean isSingleValued(IndexReader reader, SortField field) throws IOException {
SortField.Type type = IndexSortConfig.getSortFieldType(field);
for (LeafReaderContext context : reader.leaves()) {
if (type == SortField.Type.STRING) {
final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field.getField());
if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) {
return false;
}
} else {
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field.getField());
if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) {
return false;
}
}
}
return true;
}
}

View File

@ -0,0 +1,57 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.sort.SortOrder;
class CompositeValuesSourceConfig {
private final String name;
private final ValuesSource vs;
private final int reverseMul;
private final boolean canEarlyTerminate;
CompositeValuesSourceConfig(String name, ValuesSource vs, SortOrder order, boolean canEarlyTerminate) {
this.name = name;
this.vs = vs;
this.canEarlyTerminate = canEarlyTerminate;
this.reverseMul = order == SortOrder.ASC ? 1 : -1;
}
String name() {
return name;
}
ValuesSource valuesSource() {
return vs;
}
/**
* The sort order for the values source (e.g. -1 for descending and 1 for ascending).
*/
int reverseMul() {
assert reverseMul == -1 || reverseMul == 1;
return reverseMul;
}
boolean canEarlyTerminate() {
return canEarlyTerminate;
}
}

View File

@ -0,0 +1,122 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.AbstractObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.support.ValueType;
import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
class CompositeValuesSourceParserHelper {
static <VB extends CompositeValuesSourceBuilder<VB>, T> void declareValuesSourceFields(AbstractObjectParser<VB, T> objectParser,
ValueType targetValueType) {
objectParser.declareField(VB::field, XContentParser::text,
new ParseField("field"), ObjectParser.ValueType.STRING);
objectParser.declareField(VB::missing, XContentParser::objectText,
new ParseField("missing"), ObjectParser.ValueType.VALUE);
objectParser.declareField(VB::valueType, p -> {
ValueType valueType = ValueType.resolveForScript(p.text());
if (targetValueType != null && valueType.isNotA(targetValueType)) {
throw new ParsingException(p.getTokenLocation(),
"Aggregation [" + objectParser.getName() + "] was configured with an incompatible value type ["
+ valueType + "]. It can only work on value of type ["
+ targetValueType + "]");
}
return valueType;
}, new ParseField("value_type"), ObjectParser.ValueType.STRING);
objectParser.declareField(VB::script,
(parser, context) -> Script.parse(parser), Script.SCRIPT_PARSE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING);
objectParser.declareField(VB::order, XContentParser::text, new ParseField("order"), ObjectParser.ValueType.STRING);
}
static void writeTo(CompositeValuesSourceBuilder<?> builder, StreamOutput out) throws IOException {
final byte code;
if (builder.getClass() == TermsValuesSourceBuilder.class) {
code = 0;
} else if (builder.getClass() == DateHistogramValuesSourceBuilder.class) {
code = 1;
} else if (builder.getClass() == HistogramValuesSourceBuilder.class) {
code = 2;
} else {
throw new IOException("invalid builder type: " + builder.getClass().getSimpleName());
}
out.writeByte(code);
builder.writeTo(out);
}
static CompositeValuesSourceBuilder<?> readFrom(StreamInput in) throws IOException {
int code = in.readByte();
switch(code) {
case 0:
return new TermsValuesSourceBuilder(in);
case 1:
return new DateHistogramValuesSourceBuilder(in);
case 2:
return new HistogramValuesSourceBuilder(in);
default:
throw new IOException("Invalid code " + code);
}
}
static CompositeValuesSourceBuilder<?> fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String name = parser.currentName();
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String type = parser.currentName();
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
final CompositeValuesSourceBuilder<?> builder;
switch(type) {
case TermsValuesSourceBuilder.TYPE:
builder = TermsValuesSourceBuilder.parse(name, parser);
break;
case DateHistogramValuesSourceBuilder.TYPE:
builder = DateHistogramValuesSourceBuilder.parse(name, parser);
break;
case HistogramValuesSourceBuilder.TYPE:
builder = HistogramValuesSourceBuilder.parse(name, parser);
break;
default:
throw new ParsingException(parser.getTokenLocation(), "invalid source type: " + type);
}
parser.nextToken();
parser.nextToken();
return builder;
}
}

View File

@ -0,0 +1,243 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.DateTimeUnit;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.support.FieldContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS;
/**
* A {@link CompositeValuesSourceBuilder} that that builds a {@link RoundingValuesSource} from a {@link Script} or
* a field name.
*/
public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<DateHistogramValuesSourceBuilder> {
static final String TYPE = "date_histogram";
private static final ObjectParser<DateHistogramValuesSourceBuilder, Void> PARSER;
static {
PARSER = new ObjectParser<>(DateHistogramValuesSourceBuilder.TYPE);
PARSER.declareField((histogram, interval) -> {
if (interval instanceof Long) {
histogram.interval((long) interval);
} else {
histogram.dateHistogramInterval((DateHistogramInterval) interval);
}
}, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
return p.longValue();
} else {
return new DateHistogramInterval(p.text());
}
}, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG);
PARSER.declareField(DateHistogramValuesSourceBuilder::timeZone, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return DateTimeZone.forID(p.text());
} else {
return DateTimeZone.forOffsetHours(p.intValue());
}
}, new ParseField("time_zone"), ObjectParser.ValueType.LONG);
CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, ValueType.NUMERIC);
}
static DateHistogramValuesSourceBuilder parse(String name, XContentParser parser) throws IOException {
return PARSER.parse(parser, new DateHistogramValuesSourceBuilder(name), null);
}
private long interval = 0;
private DateTimeZone timeZone = null;
private DateHistogramInterval dateHistogramInterval;
public DateHistogramValuesSourceBuilder(String name) {
super(name, ValueType.DATE);
}
protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException {
super(in);
this.interval = in.readLong();
this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new);
if (in.readBoolean()) {
timeZone = DateTimeZone.forID(in.readString());
}
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeLong(interval);
out.writeOptionalWriteable(dateHistogramInterval);
boolean hasTimeZone = timeZone != null;
out.writeBoolean(hasTimeZone);
if (hasTimeZone) {
out.writeString(timeZone.getID());
}
}
@Override
protected void doXContentBody(XContentBuilder builder, Params params) throws IOException {
if (dateHistogramInterval == null) {
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);
} else {
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString());
}
if (timeZone != null) {
builder.field("time_zone", timeZone);
}
}
@Override
protected int innerHashCode() {
return Objects.hash(interval, dateHistogramInterval, timeZone);
}
@Override
protected boolean innerEquals(DateHistogramValuesSourceBuilder other) {
return Objects.equals(interval, other.interval)
&& Objects.equals(dateHistogramInterval, other.dateHistogramInterval)
&& Objects.equals(timeZone, other.timeZone);
}
@Override
public String type() {
return TYPE;
}
/**
* Returns the interval in milliseconds that is set on this source
**/
public long interval() {
return interval;
}
/**
* Sets the interval on this source.
* If both {@link #interval()} and {@link #dateHistogramInterval()} are set,
* then the {@link #dateHistogramInterval()} wins.
**/
public DateHistogramValuesSourceBuilder interval(long interval) {
if (interval < 1) {
throw new IllegalArgumentException("[interval] must be 1 or greater for [date_histogram] source");
}
this.interval = interval;
return this;
}
/**
* Returns the date interval that is set on this source
**/
public DateHistogramInterval dateHistogramInterval() {
return dateHistogramInterval;
}
public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInterval dateHistogramInterval) {
if (dateHistogramInterval == null) {
throw new IllegalArgumentException("[dateHistogramInterval] must not be null");
}
this.dateHistogramInterval = dateHistogramInterval;
return this;
}
/**
* Sets the time zone to use for this aggregation
*/
public DateHistogramValuesSourceBuilder timeZone(DateTimeZone timeZone) {
if (timeZone == null) {
throw new IllegalArgumentException("[timeZone] must not be null: [" + name + "]");
}
this.timeZone = timeZone;
return this;
}
/**
* Gets the time zone to use for this aggregation
*/
public DateTimeZone timeZone() {
return timeZone;
}
private Rounding createRounding() {
Rounding.Builder tzRoundingBuilder;
if (dateHistogramInterval != null) {
DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString());
if (dateTimeUnit != null) {
tzRoundingBuilder = Rounding.builder(dateTimeUnit);
} else {
// the interval is a time value?
tzRoundingBuilder = Rounding.builder(
TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"));
}
} else {
// the interval is an integer time value in millis?
tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval));
}
if (timeZone() != null) {
tzRoundingBuilder.timeZone(timeZone());
}
Rounding rounding = tzRoundingBuilder.build();
return rounding;
}
@Override
protected CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException {
Rounding rounding = createRounding();
ValuesSource orig = config.toValuesSource(context.getQueryShardContext());
if (orig == null) {
orig = ValuesSource.Numeric.EMPTY;
}
if (orig instanceof ValuesSource.Numeric) {
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
RoundingValuesSource vs = new RoundingValuesSource(numeric, rounding);
boolean canEarlyTerminate = false;
final FieldContext fieldContext = config.fieldContext();
if (sortField != null &&
pos == numPos-1 &&
fieldContext != null) {
canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(),
fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField);
}
return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate);
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}
}
}

View File

@ -0,0 +1,78 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import java.io.IOException;
class HistogramValuesSource extends ValuesSource.Numeric {
private final Numeric vs;
private final double interval;
/**
*
* @param vs The original values source
*/
HistogramValuesSource(Numeric vs, double interval) {
this.vs = vs;
this.interval = interval;
}
@Override
public boolean isFloatingPoint() {
return true;
}
@Override
public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws IOException {
SortedNumericDoubleValues values = vs.doubleValues(context);
return new SortedNumericDoubleValues() {
@Override
public double nextValue() throws IOException {
return Math.floor(values.nextValue() / interval) * interval;
}
@Override
public int docValueCount() {
return values.docValueCount();
}
@Override
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
};
}
@Override
public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException {
throw new UnsupportedOperationException("not applicable");
}
@Override
public SortedNumericDocValues longValues(LeafReaderContext context) throws IOException {
throw new UnsupportedOperationException("not applicable");
}
}

View File

@ -0,0 +1,136 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.support.FieldContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import java.util.Objects;
/**
* A {@link CompositeValuesSourceBuilder} that that builds a {@link HistogramValuesSource} from another numeric values source
* using the provided interval.
*/
public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<HistogramValuesSourceBuilder> {
static final String TYPE = "histogram";
private static final ObjectParser<HistogramValuesSourceBuilder, Void> PARSER;
static {
PARSER = new ObjectParser<>(HistogramValuesSourceBuilder.TYPE);
PARSER.declareDouble(HistogramValuesSourceBuilder::interval, Histogram.INTERVAL_FIELD);
CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, ValueType.NUMERIC);
}
static HistogramValuesSourceBuilder parse(String name, XContentParser parser) throws IOException {
return PARSER.parse(parser, new HistogramValuesSourceBuilder(name), null);
}
private double interval = 0;
public HistogramValuesSourceBuilder(String name) {
super(name, ValueType.DOUBLE);
}
protected HistogramValuesSourceBuilder(StreamInput in) throws IOException {
super(in);
this.interval = in.readDouble();
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeDouble(interval);
}
@Override
protected void doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval);
}
@Override
protected int innerHashCode() {
return Objects.hash(interval);
}
@Override
protected boolean innerEquals(HistogramValuesSourceBuilder other) {
return Objects.equals(interval, other.interval);
}
@Override
public String type() {
return TYPE;
}
/**
* Returns the interval that is set on this source
**/
public double interval() {
return interval;
}
/**
* Sets the interval on this source.
**/
public HistogramValuesSourceBuilder interval(double interval) {
if (interval <= 0) {
throw new IllegalArgumentException("[interval] must be greater than 0 for [histogram] source");
}
this.interval = interval;
return this;
}
@Override
protected CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException {
ValuesSource orig = config.toValuesSource(context.getQueryShardContext());
if (orig == null) {
orig = ValuesSource.Numeric.EMPTY;
}
if (orig instanceof ValuesSource.Numeric) {
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
HistogramValuesSource vs = new HistogramValuesSource(numeric, interval);
boolean canEarlyTerminate = false;
final FieldContext fieldContext = config.fieldContext();
if (sortField != null &&
pos == numPos-1 &&
fieldContext != null) {
canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(),
fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField);
}
return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate);
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}
}
}

View File

@ -0,0 +1,371 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.KeyComparable;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.PriorityQueue;
import java.util.Set;
public class InternalComposite
extends InternalMultiBucketAggregation<InternalComposite, InternalComposite.InternalBucket> implements CompositeAggregation {
private final int size;
private final List<InternalBucket> buckets;
private final int[] reverseMuls;
private final List<String> sourceNames;
InternalComposite(String name, int size, List<String> sourceNames, List<InternalBucket> buckets, int[] reverseMuls,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData);
this.sourceNames = sourceNames;
this.buckets = buckets;
this.size = size;
this.reverseMuls = reverseMuls;
}
InternalComposite(StreamInput in) throws IOException {
super(in);
this.size = in.readVInt();
this.sourceNames = in.readList(StreamInput::readString);
this.reverseMuls = in.readIntArray();
this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, reverseMuls));
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeVInt(size);
out.writeStringList(sourceNames);
out.writeIntArray(reverseMuls);
out.writeList(buckets);
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return CompositeAggregation.toXContentFragment(this, builder, params);
}
@Override
public String getWriteableName() {
return CompositeAggregationBuilder.NAME;
}
@Override
public InternalComposite create(List<InternalBucket> buckets) {
return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, pipelineAggregators(), getMetaData());
}
@Override
public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) {
return new InternalBucket(prototype.sourceNames, prototype.key, prototype.reverseMuls, prototype.docCount, aggregations);
}
public int getSize() {
return size;
}
@Override
public List<InternalBucket> getBuckets() {
return buckets;
}
@Override
public Map<String, Object> afterKey() {
return buckets.size() > 0 ? buckets.get(buckets.size()-1).getKey() : null;
}
// Visible for tests
int[] getReverseMuls() {
return reverseMuls;
}
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
PriorityQueue<BucketIterator> pq = new PriorityQueue<>(aggregations.size());
for (InternalAggregation agg : aggregations) {
InternalComposite sortedAgg = (InternalComposite) agg;
BucketIterator it = new BucketIterator(sortedAgg.buckets);
if (it.next() != null) {
pq.add(it);
}
}
InternalBucket lastBucket = null;
List<InternalBucket> buckets = new ArrayList<>();
List<InternalBucket> result = new ArrayList<>();
while (pq.size() > 0) {
BucketIterator bucketIt = pq.poll();
if (lastBucket != null && bucketIt.current.compareKey(lastBucket) != 0) {
InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext);
buckets.clear();
result.add(reduceBucket);
if (result.size() >= size) {
break;
}
}
lastBucket = bucketIt.current;
buckets.add(bucketIt.current);
if (bucketIt.next() != null) {
pq.add(bucketIt);
}
}
if (buckets.size() > 0) {
InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext);
result.add(reduceBucket);
}
return new InternalComposite(name, size, sourceNames, result, reverseMuls, pipelineAggregators(), metaData);
}
@Override
protected boolean doEquals(Object obj) {
InternalComposite that = (InternalComposite) obj;
return Objects.equals(size, that.size) &&
Objects.equals(buckets, that.buckets) &&
Arrays.equals(reverseMuls, that.reverseMuls);
}
@Override
protected int doHashCode() {
return Objects.hash(size, buckets, Arrays.hashCode(reverseMuls));
}
private static class BucketIterator implements Comparable<BucketIterator> {
final Iterator<InternalBucket> it;
InternalBucket current;
private BucketIterator(List<InternalBucket> buckets) {
this.it = buckets.iterator();
}
@Override
public int compareTo(BucketIterator other) {
return current.compareKey(other.current);
}
InternalBucket next() {
return current = it.hasNext() ? it.next() : null;
}
}
static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket
implements CompositeAggregation.Bucket, KeyComparable<InternalBucket> {
private final CompositeKey key;
private final long docCount;
private final InternalAggregations aggregations;
private final transient int[] reverseMuls;
private final transient List<String> sourceNames;
InternalBucket(List<String> sourceNames, CompositeKey key, int[] reverseMuls, long docCount, InternalAggregations aggregations) {
this.key = key;
this.docCount = docCount;
this.aggregations = aggregations;
this.reverseMuls = reverseMuls;
this.sourceNames = sourceNames;
}
@SuppressWarnings("unchecked")
InternalBucket(StreamInput in, List<String> sourceNames, int[] reverseMuls) throws IOException {
final Comparable<?>[] values = new Comparable<?>[in.readVInt()];
for (int i = 0; i < values.length; i++) {
values[i] = (Comparable<?>) in.readGenericValue();
}
this.key = new CompositeKey(values);
this.docCount = in.readVLong();
this.aggregations = InternalAggregations.readAggregations(in);
this.reverseMuls = reverseMuls;
this.sourceNames = sourceNames;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(key.size());
for (int i = 0; i < key.size(); i++) {
out.writeGenericValue(key.get(i));
}
out.writeVLong(docCount);
aggregations.writeTo(out);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), docCount, key, aggregations);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
InternalBucket that = (InternalBucket) obj;
return Objects.equals(docCount, that.docCount)
&& Objects.equals(key, that.key)
&& Objects.equals(aggregations, that.aggregations);
}
@Override
public Map<String, Object> getKey() {
return new ArrayMap(sourceNames, key.values());
}
// visible for testing
CompositeKey getRawKey() {
return key;
}
@Override
public String getKeyAsString() {
StringBuilder builder = new StringBuilder();
builder.append('{');
for (int i = 0; i < key.size(); i++) {
if (i > 0) {
builder.append(", ");
}
builder.append(sourceNames.get(i));
builder.append('=');
builder.append(formatObject(key.get(i)));
}
builder.append('}');
return builder.toString();
}
@Override
public long getDocCount() {
return docCount;
}
@Override
public Aggregations getAggregations() {
return aggregations;
}
InternalBucket reduce(List<InternalBucket> buckets, ReduceContext reduceContext) {
List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());
long docCount = 0;
for (InternalBucket bucket : buckets) {
docCount += bucket.docCount;
aggregations.add(bucket.aggregations);
}
InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext);
return new InternalBucket(sourceNames, key, reverseMuls, docCount, aggs);
}
@Override
public int compareKey(InternalBucket other) {
for (int i = 0; i < key.size(); i++) {
assert key.get(i).getClass() == other.key.get(i).getClass();
@SuppressWarnings("unchecked")
int cmp = ((Comparable) key.get(i)).compareTo(other.key.get(i)) * reverseMuls[i];
if (cmp != 0) {
return cmp;
}
}
return 0;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
/**
* See {@link CompositeAggregation#bucketToXContentFragment}
*/
throw new UnsupportedOperationException("not implemented");
}
}
static Object formatObject(Object obj) {
if (obj instanceof BytesRef) {
return ((BytesRef) obj).utf8ToString();
}
return obj;
}
private static class ArrayMap extends AbstractMap<String, Object> {
final List<String> keys;
final Object[] values;
ArrayMap(List<String> keys, Object[] values) {
assert keys.size() == values.length;
this.keys = keys;
this.values = values;
}
@Override
public int size() {
return values.length;
}
@Override
public Object get(Object key) {
for (int i = 0; i < keys.size(); i++) {
if (key.equals(keys.get(i))) {
return formatObject(values[i]);
}
}
return null;
}
@Override
public Set<Entry<String, Object>> entrySet() {
return new AbstractSet<Entry<String, Object>>() {
@Override
public Iterator<Entry<String, Object>> iterator() {
return new Iterator<Entry<String, Object>>() {
int pos = 0;
@Override
public boolean hasNext() {
return pos < values.length;
}
@Override
public Entry<String, Object> next() {
SimpleEntry<String, Object> entry =
new SimpleEntry<>(keys.get(pos), formatObject(values[pos]));
++ pos;
return entry;
}
};
}
@Override
public int size() {
return keys.size();
}
};
}
}
}

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class ParsedComposite extends ParsedMultiBucketAggregation<ParsedComposite.ParsedBucket> implements CompositeAggregation {
private static ObjectParser<ParsedComposite, Void> PARSER =
new ObjectParser<>(ParsedComposite.class.getSimpleName(), true, ParsedComposite::new);
static {
declareMultiBucketAggregationFields(PARSER,
parser -> ParsedComposite.ParsedBucket.fromXContent(parser),
parser -> null
);
}
public static ParsedComposite fromXContent(XContentParser parser, String name) throws IOException {
ParsedComposite aggregation = PARSER.parse(parser, null);
aggregation.setName(name);
return aggregation;
}
@Override
public String getType() {
return CompositeAggregationBuilder.NAME;
}
@Override
public List<ParsedBucket> getBuckets() {
return buckets;
}
@Override
public Map<String, Object> afterKey() {
return buckets.size() > 0 ? buckets.get(buckets.size()-1).getKey() : null;
}
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return CompositeAggregation.toXContentFragment(this, builder, params);
}
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements CompositeAggregation.Bucket {
private Map<String, Object> key;
@Override
public String getKeyAsString() {
return key.toString();
}
@Override
public Map<String, Object> getKey() {
return key;
}
void setKey(Map<String, Object> key) {
this.key = key;
}
@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
/**
* See {@link CompositeAggregation#bucketToXContent}
*/
throw new UnsupportedOperationException("not implemented");
}
static ParsedComposite.ParsedBucket fromXContent(XContentParser parser) throws IOException {
return parseXContent(parser, false, ParsedBucket::new,
(p, bucket) -> bucket.setKey(p.mapOrdered()));
}
}
}

View File

@ -0,0 +1,104 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import java.io.IOException;
/**
* A wrapper for {@link ValuesSource.Numeric} that uses {@link Rounding} to transform the long values
* produced by the underlying source.
*/
class RoundingValuesSource extends ValuesSource.Numeric {
private final ValuesSource.Numeric vs;
private final Rounding rounding;
/**
*
* @param vs The original values source
* @param rounding How to round the values
*/
RoundingValuesSource(Numeric vs, Rounding rounding) {
this.vs = vs;
this.rounding = rounding;
}
@Override
public boolean isFloatingPoint() {
return false;
}
@Override
public SortedNumericDocValues longValues(LeafReaderContext context) throws IOException {
SortedNumericDocValues values = vs.longValues(context);
return new SortedNumericDocValues() {
@Override
public long nextValue() throws IOException {
return rounding.round(values.nextValue());
}
@Override
public int docValueCount() {
return values.docValueCount();
}
@Override
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public int docID() {
return values.docID();
}
@Override
public int nextDoc() throws IOException {
return values.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return values.advance(target);
}
@Override
public long cost() {
return values.cost();
}
};
}
@Override
public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException {
throw new UnsupportedOperationException("not applicable");
}
@Override
public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws IOException {
throw new UnsupportedOperationException("not applicable");
}
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.FieldContext;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
/**
* A {@link CompositeValuesSourceBuilder} that builds a {@link ValuesSource} from a {@link Script} or
* a field name.
*/
public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder<TermsValuesSourceBuilder> {
static final String TYPE = "terms";
private static final ObjectParser<TermsValuesSourceBuilder, Void> PARSER;
static {
PARSER = new ObjectParser<>(TermsValuesSourceBuilder.TYPE);
CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, null);
}
static TermsValuesSourceBuilder parse(String name, XContentParser parser) throws IOException {
return PARSER.parse(parser, new TermsValuesSourceBuilder(name), null);
}
public TermsValuesSourceBuilder(String name) {
super(name);
}
protected TermsValuesSourceBuilder(StreamInput in) throws IOException {
super(in);
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {}
@Override
protected void doXContentBody(XContentBuilder builder, Params params) throws IOException {}
@Override
protected int innerHashCode() {
return 0;
}
@Override
protected boolean innerEquals(TermsValuesSourceBuilder builder) {
return true;
}
@Override
public String type() {
return TYPE;
}
@Override
protected CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException {
ValuesSource vs = config.toValuesSource(context.getQueryShardContext());
if (vs == null) {
vs = ValuesSource.Numeric.EMPTY;
}
boolean canEarlyTerminate = false;
final FieldContext fieldContext = config.fieldContext();
if (sortField != null && config.fieldContext() != null) {
canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(),
fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField);
}
return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate);
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite.spi;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.plugins.spi.NamedXContentProvider;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.composite.CompositeAggregationBuilder;
import org.elasticsearch.search.aggregations.composite.ParsedComposite;
import java.util.List;
import static java.util.Collections.singletonList;
public class CompositeNamedXContentProvider implements NamedXContentProvider {
@Override
public List<NamedXContentRegistry.Entry> getNamedXContentParsers() {
ParseField parseField = new ParseField(CompositeAggregationBuilder.NAME);
ContextParser<Object, Aggregation> contextParser = (p, name) -> ParsedComposite.fromXContent(p, (String) name);
return singletonList(new NamedXContentRegistry.Entry(Aggregation.class, parseField, contextParser));
}
}

View File

@ -0,0 +1 @@
org.elasticsearch.search.aggregations.composite.spi.CompositeNamedXContentProvider

View File

@ -0,0 +1,196 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.hamcrest.Matchers.hasSize;
public class CompositeAggregationBuilderTests extends ESTestCase {
static final CompositeAggregationPlugin PLUGIN = new CompositeAggregationPlugin();
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(
new SearchModule(Settings.EMPTY, false, Collections.singletonList(PLUGIN)).getNamedXContents()
);
}
@Override
protected NamedWriteableRegistry writableRegistry() {
return new NamedWriteableRegistry(
new SearchModule(Settings.EMPTY, false, Collections.singletonList(PLUGIN)).getNamedWriteables()
);
}
private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() {
DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10));
if (randomBoolean()) {
histo.field(randomAlphaOfLengthBetween(1, 20));
} else {
histo.script(new Script(randomAlphaOfLengthBetween(10, 20)));
}
if (randomBoolean()) {
histo.dateHistogramInterval(randomFrom(DateHistogramInterval.days(10),
DateHistogramInterval.minutes(1), DateHistogramInterval.weeks(1)));
} else {
histo.interval(randomNonNegativeLong());
}
if (randomBoolean()) {
histo.timeZone(randomDateTimeZone());
}
return histo;
}
private TermsValuesSourceBuilder randomTermsSourceBuilder() {
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10));
if (randomBoolean()) {
terms.field(randomAlphaOfLengthBetween(1, 20));
} else {
terms.script(new Script(randomAlphaOfLengthBetween(10, 20)));
}
terms.order(randomFrom(SortOrder.values()));
return terms;
}
private HistogramValuesSourceBuilder randomHistogramSourceBuilder() {
HistogramValuesSourceBuilder histo = new HistogramValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10));
if (randomBoolean()) {
histo.field(randomAlphaOfLengthBetween(1, 20));
} else {
histo.script(new Script(randomAlphaOfLengthBetween(10, 20)));
}
histo.interval(randomDoubleBetween(Math.nextUp(0), Double.MAX_VALUE, false));
return histo;
}
private CompositeAggregationBuilder randomBuilder() {
int numSources = randomIntBetween(1, 10);
List<CompositeValuesSourceBuilder<?>> sources = new ArrayList<>();
for (int i = 0; i < numSources; i++) {
int type = randomIntBetween(0, 2);
switch (type) {
case 0:
sources.add(randomTermsSourceBuilder());
break;
case 1:
sources.add(randomDateHistogramSourceBuilder());
break;
case 2:
sources.add(randomHistogramSourceBuilder());
break;
default:
throw new AssertionError("wrong branch");
}
}
return new CompositeAggregationBuilder(randomAlphaOfLength(10), sources);
}
public void testFromXContent() throws IOException {
CompositeAggregationBuilder testAgg = randomBuilder();
AggregatorFactories.Builder factoriesBuilder = AggregatorFactories.builder().addAggregator(testAgg);
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
if (randomBoolean()) {
builder.prettyPrint();
}
factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
XContentBuilder shuffled = shuffleXContent(builder);
XContentParser parser = createParser(shuffled);
AggregationBuilder newAgg = assertParse(parser);
assertNotSame(newAgg, testAgg);
assertEquals(testAgg, newAgg);
assertEquals(testAgg.hashCode(), newAgg.hashCode());
}
public void testToString() throws IOException {
CompositeAggregationBuilder testAgg = randomBuilder();
String toString = randomBoolean() ? Strings.toString(testAgg) : testAgg.toString();
XContentParser parser = createParser(XContentType.JSON.xContent(), toString);
AggregationBuilder newAgg = assertParse(parser);
assertNotSame(newAgg, testAgg);
assertEquals(testAgg, newAgg);
assertEquals(testAgg.hashCode(), newAgg.hashCode());
}
private AggregationBuilder assertParse(XContentParser parser) throws IOException {
assertSame(XContentParser.Token.START_OBJECT, parser.nextToken());
AggregatorFactories.Builder parsed = AggregatorFactories.parseAggregators(parser);
assertThat(parsed.getAggregatorFactories(), hasSize(1));
assertThat(parsed.getPipelineAggregatorFactories(), hasSize(0));
AggregationBuilder newAgg = parsed.getAggregatorFactories().get(0);
assertNull(parser.nextToken());
assertNotNull(newAgg);
return newAgg;
}
/**
* Test serialization and deserialization of the test AggregatorFactory.
*/
public void testSerialization() throws IOException {
CompositeAggregationBuilder testAgg = randomBuilder();
try (BytesStreamOutput output = new BytesStreamOutput()) {
output.writeNamedWriteable(testAgg);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) {
AggregationBuilder deserialized = in.readNamedWriteable(AggregationBuilder.class);
assertEquals(testAgg, deserialized);
assertEquals(testAgg.hashCode(), deserialized.hashCode());
assertNotSame(testAgg, deserialized);
}
}
}
public void testEqualsAndHashcode() throws IOException {
checkEqualsAndHashCode(randomBuilder(), this::copyAggregation);
}
private CompositeAggregationBuilder copyAggregation(CompositeAggregationBuilder agg) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
agg.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) {
return (CompositeAggregationBuilder) writableRegistry().getReader(AggregationBuilder.class,
agg.getWriteableName()).read(in);
}
}
}
}

View File

@ -0,0 +1,35 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
public class CompositeAggregationsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
public CompositeAggregationsClientYamlTestSuiteIT(@Name("yaml")ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
}

View File

@ -0,0 +1,256 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.composite;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.ParsedAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.junit.After;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.stream.Collectors;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class InternalCompositeTests extends InternalMultiBucketAggregationTestCase<InternalComposite> {
private List<String> sourceNames;
private int[] reverseMuls;
private int[] formats;
private int size;
@Override
public void setUp() throws Exception {
super.setUp();
int numFields = randomIntBetween(1, 10);
size = randomNumberOfBuckets();
sourceNames = new ArrayList<>();
reverseMuls = new int[numFields];
formats = new int[numFields];
for (int i = 0; i < numFields; i++) {
sourceNames.add("field_" + i);
reverseMuls[i] = randomBoolean() ? 1 : -1;
formats[i] = randomIntBetween(0, 2);
}
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
sourceNames= null;
reverseMuls = null;
formats = null;
}
@Override
protected List<NamedXContentRegistry.Entry> getNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(getDefaultNamedXContents());
ContextParser<Object, Aggregation> parser = (p, c) -> ParsedComposite.fromXContent(p, (String) c);
namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(CompositeAggregationBuilder.NAME), parser));
return namedXContents;
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(
new SearchModule(
Settings.EMPTY, false, Collections.singletonList(new CompositeAggregationPlugin())
).getNamedWriteables()
);
}
@Override
protected Writeable.Reader<InternalComposite> instanceReader() {
return InternalComposite::new;
}
@Override
protected Class<ParsedComposite> implementationClass() {
return ParsedComposite.class;
}
protected <P extends ParsedAggregation> P parseAndAssert(final InternalAggregation aggregation,
final boolean shuffled, final boolean addRandomFields) throws IOException {
return super.parseAndAssert(aggregation, false, false);
}
private CompositeKey createCompositeKey() {
Comparable<?>[] keys = new Comparable<?>[sourceNames.size()];
for (int j = 0; j < keys.length; j++) {
switch (formats[j]) {
case 0:
keys[j] = randomLong();
break;
case 1:
keys[j] = randomDouble();
break;
case 2:
keys[j] = new BytesRef(randomAsciiLettersOfLengthBetween(1, 20));
break;
default:
throw new AssertionError("illegal branch");
}
}
return new CompositeKey(keys);
}
@SuppressWarnings("unchecked")
private Comparator<CompositeKey> getKeyComparator() {
return (o1, o2) -> {
for (int i = 0; i < o1.size(); i++) {
int cmp = ((Comparable) o1.get(i)).compareTo(o2.get(i)) * reverseMuls[i];
if (cmp != 0) {
return cmp;
}
}
return 0;
};
}
@SuppressWarnings("unchecked")
private Comparator<InternalComposite.InternalBucket> getBucketComparator() {
return (o1, o2) -> {
for (int i = 0; i < o1.getRawKey().size(); i++) {
int cmp = ((Comparable) o1.getRawKey().get(i)).compareTo(o2.getRawKey().get(i)) * reverseMuls[i];
if (cmp != 0) {
return cmp;
}
}
return 0;
};
}
@Override
protected InternalComposite createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData, InternalAggregations aggregations) {
int numBuckets = randomIntBetween(0, size);
List<InternalComposite.InternalBucket> buckets = new ArrayList<>();
TreeSet<CompositeKey> keys = new TreeSet<>(getKeyComparator());
for (int i = 0; i < numBuckets; i++) {
final CompositeKey key = createCompositeKey();
if (keys.contains(key)) {
continue;
}
keys.add(key);
InternalComposite.InternalBucket bucket =
new InternalComposite.InternalBucket(sourceNames, key, reverseMuls, 1L, aggregations);
buckets.add(bucket);
}
Collections.sort(buckets, (o1, o2) -> o1.compareKey(o2));
return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, Collections.emptyList(), metaData);
}
@Override
protected InternalComposite mutateInstance(InternalComposite instance) throws IOException {
List<InternalComposite.InternalBucket> buckets = instance.getBuckets();
Map<String, Object> metaData = instance.getMetaData();
int code = randomIntBetween(0, 2);
int[] reverseMuls = instance.getReverseMuls();
switch(code) {
case 0:
int[] newReverseMuls = new int[reverseMuls.length];
for (int i = 0; i < reverseMuls.length; i++) {
newReverseMuls[i] = reverseMuls[i] == 1 ? -1 : 1;
}
reverseMuls = newReverseMuls;
break;
case 1:
buckets = new ArrayList<>(buckets);
buckets.add(new InternalComposite.InternalBucket(sourceNames, createCompositeKey(), reverseMuls,
randomLongBetween(1, 100), InternalAggregations.EMPTY)
);
break;
case 2:
if (metaData == null) {
metaData = new HashMap<>(1);
} else {
metaData = new HashMap<>(instance.getMetaData());
}
metaData.put(randomAlphaOfLength(15), randomInt());
break;
default:
throw new AssertionError("illegal branch");
}
return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, buckets, reverseMuls,
instance.pipelineAggregators(), metaData);
}
@Override
protected void assertReduced(InternalComposite reduced, List<InternalComposite> inputs) {
List<CompositeKey> expectedKeys = inputs.stream()
.flatMap((s) -> s.getBuckets().stream())
.map(InternalComposite.InternalBucket::getRawKey)
.sorted(getKeyComparator())
.distinct()
.limit(reduced.getSize())
.collect(Collectors.toList());
assertThat(reduced.getBuckets().size(), lessThanOrEqualTo(size));
assertThat(reduced.getBuckets().size(), equalTo(expectedKeys.size()));
Iterator<CompositeKey> expectedIt = expectedKeys.iterator();
for (InternalComposite.InternalBucket bucket : reduced.getBuckets()) {
assertTrue(expectedIt.hasNext());
assertThat(bucket.getRawKey(), equalTo(expectedIt.next()));
}
assertFalse(expectedIt.hasNext());
}
public void testReduceSame() throws IOException {
InternalComposite result = createTestInstance(randomAlphaOfLength(10), Collections.emptyList(), Collections.emptyMap(),
InternalAggregations.EMPTY);
List<InternalAggregation> toReduce = new ArrayList<>();
int numSame = randomIntBetween(1, 10);
for (int i = 0; i < numSame; i++) {
toReduce.add(result);
}
InternalComposite finalReduce = (InternalComposite) result.reduce(toReduce,
new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, true));
assertThat(finalReduce.getBuckets().size(), equalTo(result.getBuckets().size()));
Iterator<InternalComposite.InternalBucket> expectedIt = result.getBuckets().iterator();
for (InternalComposite.InternalBucket bucket : finalReduce.getBuckets()) {
InternalComposite.InternalBucket expectedBucket = expectedIt.next();
assertThat(bucket.getRawKey(), equalTo(expectedBucket.getRawKey()));
assertThat(bucket.getDocCount(), equalTo(expectedBucket.getDocCount()*numSame));
}
}
}

View File

@ -0,0 +1,13 @@
# Integration tests for Composite aggs plugin
#
"Composite aggs loaded":
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
- do:
nodes.info: {}
- match: { nodes.$master.modules.0.name: aggs-composite }

View File

@ -0,0 +1,196 @@
---
setup:
- do:
indices.create:
index: test
body:
mappings:
doc:
properties:
keyword:
type: keyword
long:
type: long
- do:
index:
index: test
type: doc
id: 1
body: { "keyword": "foo", "long": [10, 20] }
- do:
index:
index: test
type: doc
id: 2
body: { "keyword": ["foo", "bar"] }
- do:
index:
index: test
type: doc
id: 3
body: { "keyword": "bar", "long": [100, 0] }
- do:
index:
index: test
type: doc
id: 4
body: { "keyword": "bar", "long": [1000, 0] }
- do:
indices.refresh:
index: [test]
---
"Simple Composite aggregation":
- skip:
version: " - 6.99.99"
reason: this uses a new API that has been added in 7.0
- do:
search:
index: test
body:
aggregations:
test:
composite:
sources: [
"kw": {
"terms": {
"field": "keyword"
}
}
]
- match: {hits.total: 4}
- length: { aggregations.test.buckets: 2 }
- match: { aggregations.test.buckets.0.key.kw: "bar" }
- match: { aggregations.test.buckets.0.doc_count: 3 }
- match: { aggregations.test.buckets.1.key.kw: "foo" }
- match: { aggregations.test.buckets.1.doc_count: 2 }
---
"Nested Composite aggregation":
- skip:
version: " - 6.99.99"
reason: this uses a new API that has been added in 7.0
- do:
search:
index: test
body:
aggregations:
test:
composite:
sources: [
{
"long": {
"terms": {
"field": "long"
}
}
},
{
"kw": {
"terms": {
"field": "keyword"
}
}
}
]
- match: {hits.total: 4}
- length: { aggregations.test.buckets: 5 }
- match: { aggregations.test.buckets.0.key.long: 0}
- match: { aggregations.test.buckets.0.key.kw: "bar" }
- match: { aggregations.test.buckets.0.doc_count: 2 }
- match: { aggregations.test.buckets.1.key.long: 10 }
- match: { aggregations.test.buckets.1.key.kw: "foo"}
- match: { aggregations.test.buckets.1.doc_count: 1 }
- match: { aggregations.test.buckets.2.key.long: 20 }
- match: { aggregations.test.buckets.2.key.kw: "foo" }
- match: { aggregations.test.buckets.2.doc_count: 1 }
- match: { aggregations.test.buckets.3.key.long: 100}
- match: { aggregations.test.buckets.3.key.kw: "bar" }
- match: { aggregations.test.buckets.3.doc_count: 1 }
- match: { aggregations.test.buckets.4.key.long: 1000 }
- match: { aggregations.test.buckets.4.key.kw: "bar" }
- match: { aggregations.test.buckets.4.doc_count: 1 }
---
"Aggregate After":
- skip:
version: " - 6.99.99"
reason: this uses a new API that has been added in 7.0
- do:
search:
index: test
body:
aggregations:
test:
composite:
sources: [
{
"long": {
"terms": {
"field": "long"
}
}
},
{
"kw": {
"terms": {
"field": "keyword"
}
}
}
]
after: { "long": 20, "kw": "foo" }
- match: {hits.total: 4}
- length: { aggregations.test.buckets: 2 }
- match: { aggregations.test.buckets.0.key.long: 100 }
- match: { aggregations.test.buckets.0.key.kw: "bar" }
- match: { aggregations.test.buckets.0.doc_count: 1 }
- match: { aggregations.test.buckets.1.key.long: 1000 }
- match: { aggregations.test.buckets.1.key.kw: "bar" }
- match: { aggregations.test.buckets.1.doc_count: 1 }
---
"Invalid Composite aggregation":
- skip:
version: " - 6.99.99"
reason: this uses a new API that has been added in 7.0
- do:
catch: /\[composite\] aggregation cannot be used with a parent aggregation/
search:
index: test
body:
aggregations:
test:
terms:
field: long
aggs:
nested:
composite:
sources: [
{
"kw": {
"terms": {
"field": "keyword"
}
}
}
]

View File

@ -20,8 +20,10 @@
package org.elasticsearch.transport.netty4;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.Attribute;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.transport.TcpHeader;
import org.elasticsearch.transport.Transports;
@ -53,11 +55,13 @@ final class Netty4MessageChannelHandler extends ChannelDuplexHandler {
final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE);
final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize;
try {
InetSocketAddress remoteAddress = (InetSocketAddress) ctx.channel().remoteAddress();
Channel channel = ctx.channel();
InetSocketAddress remoteAddress = (InetSocketAddress) channel.remoteAddress();
// netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh
// buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size
BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize);
transport.messageReceived(reference, ctx.channel(), profileName, remoteAddress, remainingMessageSize);
Attribute<NettyTcpChannel> channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY);
transport.messageReceived(reference, channelAttribute.get(), profileName, remoteAddress, remainingMessageSize);
} finally {
// Set the expected position of the buffer, no matter what happened
buffer.readerIndex(expectedReaderIndex);

View File

@ -24,7 +24,6 @@ import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.AdaptiveRecvByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInitializer;
@ -34,6 +33,7 @@ import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.AttributeKey;
import io.netty.util.concurrent.Future;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
@ -55,24 +55,18 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.ConnectionProfile;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportRequestOptions;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
@ -85,7 +79,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
* longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for
* sending out ping requests to other nodes.
*/
public class Netty4Transport extends TcpTransport<Channel> {
public class Netty4Transport extends TcpTransport<NettyTcpChannel> {
static {
Netty4Utils.setup();
@ -97,7 +91,7 @@ public class Netty4Transport extends TcpTransport<Channel> {
(s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
"transport.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope);
"transport.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN =
byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX =
@ -116,7 +110,7 @@ public class Netty4Transport extends TcpTransport<Channel> {
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings));
this.workerCount = WORKER_COUNT.get(settings);
@ -239,10 +233,13 @@ public class Netty4Transport extends TcpTransport<Channel> {
return new ClientChannelInitializer();
}
static final AttributeKey<NettyTcpChannel> CHANNEL_KEY = AttributeKey.newInstance("es-channel");
protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class);
final Throwable t = unwrapped != null ? unwrapped : cause;
onException(ctx.channel(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t));
Channel channel = ctx.channel();
onException(channel.attr(CHANNEL_KEY).get(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t));
}
@Override
@ -252,70 +249,39 @@ public class Netty4Transport extends TcpTransport<Channel> {
}
@Override
protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile, Consumer<Channel> onChannelClose) {
final Channel[] channels = new Channel[profile.getNumConnections()];
final NodeChannels nodeChannels = new NodeChannels(node, channels, profile);
boolean success = false;
try {
final TimeValue connectTimeout;
final Bootstrap bootstrap;
final TimeValue defaultConnectTimeout = defaultConnectionProfile.getConnectTimeout();
if (profile.getConnectTimeout() != null && profile.getConnectTimeout().equals(defaultConnectTimeout) == false) {
bootstrap = this.bootstrap.clone(this.bootstrap.config().group());
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(profile.getConnectTimeout().millis()));
connectTimeout = profile.getConnectTimeout();
} else {
connectTimeout = defaultConnectTimeout;
bootstrap = this.bootstrap;
}
final ArrayList<ChannelFuture> connections = new ArrayList<>(channels.length);
final InetSocketAddress address = node.getAddress().address();
for (int i = 0; i < channels.length; i++) {
connections.add(bootstrap.connect(address));
}
final Iterator<ChannelFuture> iterator = connections.iterator();
final ChannelFutureListener closeListener = future -> onChannelClose.accept(future.channel());
try {
for (int i = 0; i < channels.length; i++) {
assert iterator.hasNext();
ChannelFuture future = iterator.next();
future.awaitUninterruptibly((long) (connectTimeout.millis() * 1.5));
if (!future.isSuccess()) {
throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", future.cause());
}
channels[i] = future.channel();
channels[i].closeFuture().addListener(closeListener);
}
assert iterator.hasNext() == false : "not all created connection have been consumed";
} catch (final RuntimeException e) {
for (final ChannelFuture future : Collections.unmodifiableList(connections)) {
FutureUtils.cancel(future);
if (future.channel() != null && future.channel().isOpen()) {
try {
future.channel().close();
} catch (Exception inner) {
e.addSuppressed(inner);
}
}
}
throw e;
}
success = true;
} finally {
if (success == false) {
try {
nodeChannels.close();
} catch (IOException e) {
logger.trace("exception while closing channels", e);
}
}
protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<NettyTcpChannel> listener)
throws IOException {
ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address());
Channel channel = channelFuture.channel();
if (channel == null) {
Netty4Utils.maybeDie(channelFuture.cause());
throw new IOException(channelFuture.cause());
}
return nodeChannels;
addClosedExceptionLogger(channel);
NettyTcpChannel nettyChannel = new NettyTcpChannel(channel);
channel.attr(CHANNEL_KEY).set(nettyChannel);
channelFuture.addListener(f -> {
if (f.isSuccess()) {
listener.onResponse(nettyChannel);
} else {
Throwable cause = f.cause();
if (cause instanceof Error) {
Netty4Utils.maybeDie(cause);
listener.onFailure(new Exception(cause));
} else {
listener.onFailure((Exception) cause);
}
}
});
return nettyChannel;
}
@Override
protected void sendMessage(Channel channel, BytesReference reference, ActionListener<Channel> listener) {
final ChannelFuture future = channel.writeAndFlush(Netty4Utils.toByteBuf(reference));
protected void sendMessage(NettyTcpChannel channel, BytesReference reference, ActionListener<NettyTcpChannel> listener) {
final ChannelFuture future = channel.getLowLevelChannel().writeAndFlush(Netty4Utils.toByteBuf(reference));
future.addListener(f -> {
if (f.isSuccess()) {
listener.onResponse(channel);
@ -331,54 +297,22 @@ public class Netty4Transport extends TcpTransport<Channel> {
}
@Override
protected void closeChannels(final List<Channel> channels, boolean blocking, boolean doNotLinger) throws IOException {
if (doNotLinger) {
for (Channel channel : channels) {
/* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't have a gazillion connections sitting
* in TIME_WAIT to free up resources quickly. This is really the only part where we close the connection from the server
* side otherwise the client (node) initiates the TCP closing sequence which doesn't cause these issues. Setting this
* by default from the beginning can have unexpected side-effects an should be avoided, our protocol is designed
* in a way that clients close connection which is how it should be*/
if (channel.isOpen()) {
channel.config().setOption(ChannelOption.SO_LINGER, 0);
}
}
}
if (blocking) {
Netty4Utils.closeChannels(channels);
} else {
for (Channel channel : channels) {
if (channel != null && channel.isOpen()) {
ChannelFuture closeFuture = channel.close();
closeFuture.addListener((f) -> {
if (f.isSuccess() == false) {
logger.warn("failed to close channel", f.cause());
}
});
}
}
}
protected InetSocketAddress getLocalAddress(NettyTcpChannel channel) {
return (InetSocketAddress) channel.getLowLevelChannel().localAddress();
}
@Override
protected InetSocketAddress getLocalAddress(Channel channel) {
return (InetSocketAddress) channel.localAddress();
}
@Override
protected Channel bind(String name, InetSocketAddress address) {
return serverBootstraps.get(name).bind(address).syncUninterruptibly().channel();
protected NettyTcpChannel bind(String name, InetSocketAddress address) {
Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel();
NettyTcpChannel esChannel = new NettyTcpChannel(channel);
channel.attr(CHANNEL_KEY).set(esChannel);
return esChannel;
}
ScheduledPing getPing() {
return scheduledPing;
}
@Override
protected boolean isOpen(Channel channel) {
return channel.isOpen();
}
@Override
@SuppressForbidden(reason = "debug")
protected void stopInternal() {
@ -420,7 +354,6 @@ public class Netty4Transport extends TcpTransport<Channel> {
Netty4Utils.maybeDie(cause);
super.exceptionCaught(ctx, cause);
}
}
protected class ServerChannelInitializer extends ChannelInitializer<Channel> {
@ -433,6 +366,10 @@ public class Netty4Transport extends TcpTransport<Channel> {
@Override
protected void initChannel(Channel ch) throws Exception {
addClosedExceptionLogger(ch);
NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch);
ch.attr(CHANNEL_KEY).set(nettyTcpChannel);
serverAcceptedChannel(nettyTcpChannel);
ch.pipeline().addLast("logging", new ESLoggingHandler());
ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels);
ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder());
@ -444,7 +381,13 @@ public class Netty4Transport extends TcpTransport<Channel> {
Netty4Utils.maybeDie(cause);
super.exceptionCaught(ctx, cause);
}
}
private void addClosedExceptionLogger(Channel channel) {
channel.closeFuture().addListener(f -> {
if (f.isSuccess() == false) {
logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause());
}
});
}
}

View File

@ -0,0 +1,74 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.transport.TcpChannel;
import java.util.concurrent.CompletableFuture;
public class NettyTcpChannel implements TcpChannel {
private final Channel channel;
private final CompletableFuture<TcpChannel> closeContext = new CompletableFuture<>();
NettyTcpChannel(Channel channel) {
this.channel = channel;
this.channel.closeFuture().addListener(f -> {
if (f.isSuccess()) {
closeContext.complete(this);
} else {
Throwable cause = f.cause();
if (cause instanceof Error) {
Netty4Utils.maybeDie(cause);
closeContext.completeExceptionally(cause);
} else {
closeContext.completeExceptionally(cause);
}
}
});
}
public Channel getLowLevelChannel() {
return channel;
}
@Override
public void close() {
channel.close();
}
@Override
public void addCloseListener(ActionListener<TcpChannel> listener) {
closeContext.whenComplete(ActionListener.toBiConsumer(listener));
}
@Override
public void setSoLinger(int value) {
channel.config().setOption(ChannelOption.SO_LINGER, value);
}
@Override
public boolean isOpen() {
return channel.isOpen();
}
}

View File

@ -25,7 +25,9 @@ import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import javax.net.ssl.SSLEngine;
import java.io.IOException;
import java.nio.ByteBuffer;
public class ByteBufBytesReferenceTests extends AbstractBytesReferenceTestCase {
@ -81,5 +83,4 @@ public class ByteBufBytesReferenceTests extends AbstractBytesReferenceTestCase {
channelBuffer.readInt(); // this advances the index of the channel buffer
assertEquals(utf8ToString, byteBufBytesReference.utf8ToString());
}
}

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel;
import org.elasticsearch.ESNetty4IntegTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -109,7 +108,7 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService);
}
protected String handleRequest(Channel channel, String profileName,
protected String handleRequest(NettyTcpChannel channel, String profileName,
StreamInput stream, long requestId, int messageLengthBytes, Version version,
InetSocketAddress remoteAddress, byte status) throws IOException {
String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version,

View File

@ -19,7 +19,6 @@
package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@ -36,6 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
@ -58,7 +58,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) {
@Override
protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException,
protected Version executeHandshake(DiscoveryNode node, NettyTcpChannel channel, TimeValue timeout) throws IOException,
InterruptedException {
if (doHandshake) {
return super.executeHandshake(node, channel, timeout);
@ -89,8 +89,9 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
@Override
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
final Netty4Transport t = (Netty4Transport) transport;
@SuppressWarnings("unchecked") final TcpTransport<Channel>.NodeChannels channels = (TcpTransport<Channel>.NodeChannels) connection;
t.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true, false);
@SuppressWarnings("unchecked")
final TcpTransport<NettyTcpChannel>.NodeChannels channels = (TcpTransport<NettyTcpChannel>.NodeChannels) connection;
TcpChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
}
public void testConnectException() throws UnknownHostException {
@ -99,7 +100,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
emptyMap(), emptySet(),Version.CURRENT));
fail("Expected ConnectTransportException");
} catch (ConnectTransportException e) {
assertThat(e.getMessage(), containsString("connect_timeout"));
assertThat(e.getMessage(), containsString("connect_exception"));
assertThat(e.getMessage(), containsString("[127.0.0.1:9876]"));
}
}

View File

@ -6,11 +6,11 @@ esplugin {
}
versions << [
'google': '1.20.0'
'google': '1.23.0'
]
dependencies {
compile "com.google.apis:google-api-services-compute:v1-rev71-${versions.google}"
compile "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}"
compile "com.google.api-client:google-api-client:${versions.google}"
compile "com.google.oauth-client:google-oauth-client:${versions.google}"
compile "com.google.http-client:google-http-client:${versions.google}"

View File

@ -1 +0,0 @@
d3e66209ae9e749b2d6833761e7885f60f285564

View File

@ -0,0 +1 @@
522ea860eb48dee71dfe2c61a1fd09663539f556

View File

@ -0,0 +1 @@
004169bfe1cf0e8b2013c9c479e43b731958bc64

View File

@ -1 +0,0 @@
2fa36fff3b5bf59a63c4f2bbfac1f88251cd7986

View File

@ -1 +0,0 @@
93d82db2bca534960253f43424b2ba9d7638b4d2

View File

@ -0,0 +1 @@
8e86c84ff3c98eca6423e97780325b299133d858

Some files were not shown because too many files have changed in this diff Show More